diff --git a/.bazelrc b/.bazelrc index 8f70af802..9a2f03564 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,3 +1,9 @@ +# Sadly, some Bazel rules we depend on have no support for bzlmod yet +# Here is an (incomplete) list of rules known to not support bzlmod. +# Please extend this list as you find more. +# - rules_nixpkgs: https://github.com/tweag/rules_nixpkgs/issues/181 +common --noenable_bzlmod + # Import bazelrc presets import %workspace%/bazel/bazelrc/bazel7.bazelrc import %workspace%/bazel/bazelrc/convenience.bazelrc @@ -48,6 +54,15 @@ common --crosstool_top=@local_config_cc//:toolchain # bazel config to explicitly disable stamping (hide version information at build time) common:nostamp --nostamp --workspace_status_command= +# bazel config to use (buildbuddy) remote cache +common:remote_cache --bes_results_url=https://app.buildbuddy.io/invocation/ +common:remote_cache --bes_backend=grpcs://remote.buildbuddy.io +common:remote_cache --remote_cache=grpcs://remote.buildbuddy.io +common:remote_cache --remote_timeout=3600 +common:remote_cache --experimental_remote_build_event_upload=minimal +common:remote_cache --nolegacy_important_outputs +common:remote_cache_readonly --noremote_upload_local_results # Uploads logs & artifacts without writing to cache + common:build_barn_rbe_ubuntu_22_04 --remote_timeout=3600 common:build_barn_rbe_ubuntu_22_04 --remote_executor=grpc://frontend.buildbarn:8980 # this maps to the kubernetes internal buildbarn/frontend service common:build_barn_rbe_ubuntu_22_04 --extra_execution_platforms=//bazel/rbe:ubuntu-act-22-04-platform diff --git a/.bazelversion b/.bazelversion index 93c8ddab9..66ce77b7e 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -7.6.0 +7.0.0 diff --git a/.github/actions/artifact_delete/action.yml b/.github/actions/artifact_delete/action.yml deleted file mode 100644 index e2f001621..000000000 --- a/.github/actions/artifact_delete/action.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Delete artifact -description: Delete an artifact by name - -inputs: - name: - description: 'The name of the artifact.' - required: true - workflowID: - description: 'The ID of the workflow.' - required: true - -runs: - using: "composite" - steps: - - name: Delete artifact - shell: bash - run: ./.github/actions/artifact_delete/delete_artifact.sh ${{ inputs.workflowID }} ${{ inputs.name }} diff --git a/.github/actions/artifact_delete/delete_artifact.sh b/.github/actions/artifact_delete/delete_artifact.sh deleted file mode 100755 index 942304831..000000000 --- a/.github/actions/artifact_delete/delete_artifact.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -# get_artifact_id retrieves the artifact id of -# an artifact that was generated by a workflow. -# $1 should be the workflow run id. $2 should be the artifact name. -function get_artifact_id { - artifact_id="$(gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - --paginate \ - "/repos/edgelesssys/constellation/actions/runs/$1/artifacts" --jq ".artifacts |= map(select(.name==\"$2\")) | .artifacts[0].id" || exit 1)" - echo "$artifact_id" | tr -d "\n" -} - -# delete_artifact_by_id deletes an artifact by its artifact id. -# $1 should be the id of the artifact. -function delete_artifact_by_id { - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/edgelesssys/constellation/actions/artifacts/$1" || exit 1 -} - -workflow_id="$1" -artifact_name="$2" - -if [[ -z $workflow_id ]] || [[ -z $artifact_name ]]; then - echo "Usage: delete_artifact.sh " - exit 1 -fi - -echo "[*] retrieving artifact ID" -artifact_id="$(get_artifact_id "$workflow_id" "$artifact_name")" - -echo "[*] deleting artifact with ID $artifact_id" -delete_artifact_by_id "$artifact_id" diff --git a/.github/actions/artifact_download/action.yml b/.github/actions/artifact_download/action.yml index e3cf3d1f8..edf875325 100644 --- a/.github/actions/artifact_download/action.yml +++ b/.github/actions/artifact_download/action.yml @@ -16,11 +16,11 @@ inputs: runs: using: "composite" steps: - - name: Install 7zip + - name: Install unzip uses: ./.github/actions/setup_bazel_nix with: nixTools: | - _7zz + unzip - name: Create temporary directory id: tempdir @@ -28,7 +28,7 @@ runs: run: echo "directory=$(mktemp -d)" >> "$GITHUB_OUTPUT" - name: Download the artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: ${{ inputs.name }} path: ${{ steps.tempdir.outputs.directory }} @@ -37,4 +37,4 @@ runs: shell: bash run: | mkdir -p ${{ inputs.path }} - 7zz x -p'${{ inputs.encryptionSecret }}' -bso0 -bsp0 -t7z -o"${{ inputs.path }}" ${{ steps.tempdir.outputs.directory }}/archive.7z + unzip -P '${{ inputs.encryptionSecret }}' -qq -d ${{ inputs.path }} ${{ steps.tempdir.outputs.directory }}/archive.zip diff --git a/.github/actions/artifact_upload/action.yml b/.github/actions/artifact_upload/action.yml index 2ef3e85a8..11dd9a0bd 100644 --- a/.github/actions/artifact_upload/action.yml +++ b/.github/actions/artifact_upload/action.yml @@ -14,19 +14,15 @@ inputs: encryptionSecret: description: 'The secret to use for encrypting the files.' required: true - overwrite: - description: 'Overwrite an artifact with the same name.' - default: false - required: false runs: using: "composite" steps: - - name: Install 7zip + - name: Install zip uses: ./.github/actions/setup_bazel_nix with: nixTools: | - _7zz + zip - name: Create temporary directory id: tempdir @@ -36,10 +32,11 @@ runs: - name: Create archive shell: bash run: | - set -euo pipefail shopt -s extglob + paths="${{ inputs.path }}" paths=${paths%$'\n'} # Remove trailing newline + # Check if any file matches the given pattern(s). something_exists=false for pattern in ${paths} @@ -60,19 +57,15 @@ runs: for target in ${paths} do - if compgen -G "${target}" > /dev/null - then - pushd "$(dirname "${target}")" - 7zz a -p'${{ inputs.encryptionSecret }}' -bso0 -bsp0 -t7z -ms=on -mhe=on "${{ steps.tempdir.outputs.directory }}/archive.7z" "$(basename "${target}")" - popd - fi + pushd "$(dirname "${target}")" || exit 1 + zip -e -P '${{ inputs.encryptionSecret }}' -r "${{ steps.tempdir.outputs.directory }}/archive.zip" "$(basename "${target}")" + popd || exit 1 done - name: Upload archive as artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: ${{ inputs.name }} - path: ${{ steps.tempdir.outputs.directory }}/archive.7z + path: ${{ steps.tempdir.outputs.directory }}/archive.zip retention-days: ${{ inputs.retention-days }} if-no-files-found: ignore - overwrite: ${{ inputs.overwrite }} diff --git a/.github/actions/build_cli/action.yml b/.github/actions/build_cli/action.yml index b74b67456..284b01e0f 100644 --- a/.github/actions/build_cli/action.yml +++ b/.github/actions/build_cli/action.yml @@ -75,9 +75,11 @@ runs: shell: bash run: bazel run //bazel/release:push + # TODO(3u13r): Replace with https://github.com/sigstore/sigstore-installer/tree/initial + # once it has the functionality - name: Install Cosign if: inputs.cosignPublicKey != '' && inputs.cosignPrivateKey != '' && inputs.cosignPassword != '' - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 + uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0 - name: Install Rekor if: inputs.cosignPublicKey != '' && inputs.cosignPrivateKey != '' && inputs.cosignPassword != '' diff --git a/.github/actions/build_micro_service/action.yml b/.github/actions/build_micro_service/action.yml index 7fecf16a2..9ec569340 100644 --- a/.github/actions/build_micro_service/action.yml +++ b/.github/actions/build_micro_service/action.yml @@ -42,7 +42,7 @@ runs: - name: Docker metadata id: meta - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 with: images: | ghcr.io/${{ github.repository }}/${{ inputs.name }} @@ -62,7 +62,7 @@ runs: - name: Build and push container image id: build-micro-service - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: context: . file: ${{ inputs.dockerfile }} diff --git a/.github/actions/cdbg_deploy/action.yml b/.github/actions/cdbg_deploy/action.yml index a51c54b6a..347ea56a0 100644 --- a/.github/actions/cdbg_deploy/action.yml +++ b/.github/actions/cdbg_deploy/action.yml @@ -40,15 +40,8 @@ runs: if: inputs.cloudProvider == 'azure' shell: bash run: | - UAMI=$(yq eval ".provider.azure.userAssignedIdentity" constellation-conf.yaml) - PRINCIPAL_ID=$(az identity show --ids "$UAMI" | yq ".principalId") - if [ -z "$PRINCIPAL_ID" ]; then - echo "::error::PRINCIPAL_ID for \"$UAMI\" not found" - echo "::group::Available identities" - az identity list | yq ".[].id" - echo "::endgroup::" - exit 1 - fi + UAMI=$(yq eval ".provider.azure.userAssignedIdentity | upcase" constellation-conf.yaml) + PRINCIPAL_ID=$(az identity list | yq ".[] | select(.id | test(\"(?i)$UAMI\"; \"g\")) | .principalId") az role assignment create --role "Key Vault Secrets User" \ --assignee "$PRINCIPAL_ID" \ --scope /subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/e2e-test-creds/providers/Microsoft.KeyVault/vaults/opensearch-creds @@ -61,7 +54,7 @@ runs: - name: Login to AWS (IAM service principal) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2EIAM aws-region: eu-central-1 @@ -80,7 +73,7 @@ runs: - name: Login to AWS (Cluster service principal) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2ECluster aws-region: eu-central-1 @@ -91,11 +84,6 @@ runs: shell: bash run: | echo "::group::cdbg deploy" - on_error() { - echo "::error::cdbg deploy failed" - } - trap on_error ERR - chmod +x $GITHUB_WORKSPACE/build/cdbg cdbg deploy \ --bootstrapper "${{ github.workspace }}/build/bootstrapper" \ diff --git a/.github/actions/check_measurements_reproducibility/action.yml b/.github/actions/check_measurements_reproducibility/action.yml deleted file mode 100644 index 184e1221f..000000000 --- a/.github/actions/check_measurements_reproducibility/action.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Check measurements reproducibility -description: Check if the measurements of a given release are reproducible. - -inputs: - version: - type: string - description: The version of the measurements that are downloaded from the CDN. - required: true - ref: - type: string - description: The git ref to check out. You probably want this to be the tag of the release you are testing. - required: true - -runs: - using: "composite" - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: ${{ inputs.ref }} - path: ./release - - - name: Set up bazel - uses: ./.github/actions/setup_bazel_nix - with: - useCache: "false" - nixTools: | - systemdUkify - jq - jd-diff-patch - moreutils - - - name: Allow unrestricted user namespaces - shell: bash - run: | - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 - - - name: Build images - id: build-images - shell: bash - run: | - set -euo pipefail - - # Build required binaries - pushd release - bazel build //image/system:stable - echo "buildPath=$PWD/bazel-bin/image" | tee -a "$GITHUB_OUTPUT" - popd - - - name: Download measurements - shell: bash - run: | - curl -fsLO https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/${{ inputs.version }}/image/measurements.json - - - name: Cleanup release measurements and generate our own - shell: bash - run: | - ${{ github.action_path }}/create_measurements.sh "${{ steps.build-images.outputs.buildPath }}" - - - name: Compare measurements - shell: bash - run: | - ${{ github.action_path }}/compare_measurements.sh "${{ steps.build-images.outputs.buildPath }}" diff --git a/.github/actions/check_measurements_reproducibility/compare_measurements.sh b/.github/actions/check_measurements_reproducibility/compare_measurements.sh deleted file mode 100755 index 5077a05f2..000000000 --- a/.github/actions/check_measurements_reproducibility/compare_measurements.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# no -e since we need to collect errors later -# no -u since it interferes with checking associative arrays -set -o pipefail -shopt -s extglob - -declare -A errors - -for directory in "$1"/system/!(mkosi_wrapper.sh); do - dirname="$(basename "$directory")" - attestationVariant="$(echo "$dirname" | cut -d_ -f2)" - - echo "Their measurements for $attestationVariant:" - ts " " < "$attestationVariant"_their-measurements.json - echo "Own measurements for $attestationVariant:" - ts " " < "$attestationVariant"_own-measurements.json - - diff="$(jd ./"$attestationVariant"_their-measurements.json ./"$attestationVariant"_own-measurements.json)" - if [[ -n $diff ]]; then - errors["$attestationVariant"]="$diff" - fi -done - -for attestationVariant in "${!errors[@]}"; do - echo "Failed to reproduce measurements for $attestationVariant:" - echo "${errors["$attestationVariant"]}" | ts " " -done - -if [[ ${#errors[@]} -ne 0 ]]; then - exit 1 -fi diff --git a/.github/actions/check_measurements_reproducibility/create_measurements.sh b/.github/actions/check_measurements_reproducibility/create_measurements.sh deleted file mode 100755 index 4cabd5df0..000000000 --- a/.github/actions/check_measurements_reproducibility/create_measurements.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -shopt -s extglob - -for directory in "$1"/system/!(mkosi_wrapper.sh); do - dirname="$(basename "$directory")" - csp="$(echo "$dirname" | cut -d_ -f1)" - attestationVariant="$(echo "$dirname" | cut -d_ -f2)" - - # This jq filter selects the measurements for the correct CSP and attestation variant - # and then removes all `warnOnly: true` measurements. - jq --arg attestation_variant "$attestationVariant" --arg csp "$csp" \ - ' - .list.[] - | select( - .attestationVariant == $attestation_variant - and (.csp | ascii_downcase) == $csp - ) - | .measurements - | to_entries - | map(select(.value.warnOnly | not)) - | from_entries - | del(.[] .warnOnly) - ' \ - measurements.json > "$attestationVariant"_their-measurements.json - - bazel run --run_under "sudo --preserve-env" //image/measured-boot/cmd -- "$directory/constellation" /dev/stdout | jq '.measurements' > ./"$attestationVariant"_own-measurements.json -done diff --git a/.github/actions/constellation_create/action.yml b/.github/actions/constellation_create/action.yml index caec827d6..a4c530c94 100644 --- a/.github/actions/constellation_create/action.yml +++ b/.github/actions/constellation_create/action.yml @@ -192,13 +192,6 @@ runs: run: | echo "flag=--force" | tee -a $GITHUB_OUTPUT - - name: Set conformance flag - id: set-conformance-flag - if: inputs.test == 'sonobuoy conformance' - shell: bash - run: | - echo "flag=--conformance" | tee -a $GITHUB_OUTPUT - - name: Constellation apply (Terraform) id: constellation-apply-terraform if: inputs.clusterCreation == 'terraform' @@ -211,7 +204,7 @@ runs: if: inputs.clusterCreation != 'terraform' shell: bash run: | - constellation apply --skip-phases=infrastructure --debug ${{ steps.set-force-flag.outputs.flag }} ${{ steps.set-conformance-flag.outputs.flag }} + constellation apply --skip-phases=infrastructure --debug ${{ steps.set-force-flag.outputs.flag }} - name: Get kubeconfig id: get-kubeconfig @@ -224,9 +217,31 @@ runs: env: KUBECONFIG: "${{ steps.get-kubeconfig.outputs.KUBECONFIG }}" JOINTIMEOUT: "1200" # 20 minutes timeout for all nodes to join - CONTROL_NODES_COUNT: "${{ inputs.controlNodesCount }}" - WORKER_NODES_COUNT: "${{ inputs.workerNodesCount }}" - run: ./.github/actions/constellation_create/wait-for-nodes.sh + run: | + echo "::group::Wait for nodes" + NODES_COUNT=$((${{ inputs.controlNodesCount }} + ${{ inputs.workerNodesCount }})) + JOINWAIT=0 + until [[ "$(kubectl get nodes -o json | jq '.items | length')" == "${NODES_COUNT}" ]] || [[ $JOINWAIT -gt $JOINTIMEOUT ]]; + do + echo "$(kubectl get nodes -o json | jq '.items | length')/"${NODES_COUNT}" nodes have joined.. waiting.." + JOINWAIT=$((JOINWAIT+30)) + sleep 30 + done + if [[ $JOINWAIT -gt $JOINTIMEOUT ]]; then + kubectl get nodes -o wide + echo "::error::Timed out waiting for nodes to join" + echo "::endgroup::" + exit 1 + fi + echo "$(kubectl get nodes -o json | jq '.items | length')/"${NODES_COUNT}" nodes have joined" + if ! kubectl wait --for=condition=ready --all nodes --timeout=20m; then + kubectl get pods -n kube-system + kubectl get events -n kube-system + echo "::error::kubectl wait timed out before all nodes became ready" + echo "::endgroup::" + exit 1 + fi + echo "::endgroup::" - name: Download boot logs if: always() @@ -257,9 +272,9 @@ runs: continue-on-error: true uses: ./.github/actions/artifact_upload with: - name: debug-logs-${{ inputs.artifactNameSuffix }} - path: | - *.log + name: serial-logs-${{ inputs.artifactNameSuffix }} + path: > + !(terraform).log encryptionSecret: ${{ inputs.encryptionSecret }} - name: Prepare terraform state folders @@ -268,12 +283,9 @@ runs: run: | mkdir to-zip cp -r constellation-terraform to-zip - # constellation-iam-terraform is optional - if [ -d constellation-iam-terraform ]; then - cp -r constellation-iam-terraform to-zip - fi - rm -f to-zip/constellation-terraform/plan.zip - rm -rf to-zip/*/.terraform + cp -r constellation-iam-terraform to-zip + rm to-zip/constellation-terraform/plan.zip + rm -rf to-zip/constellation-terraform/.terraform to-zip/constellation-iam-terraform/.terraform - name: Upload terraform state if: always() diff --git a/.github/actions/constellation_create/wait-for-nodes.sh b/.github/actions/constellation_create/wait-for-nodes.sh deleted file mode 100755 index 9fb9b36e4..000000000 --- a/.github/actions/constellation_create/wait-for-nodes.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# We don't want to abort the script if there's a transient error in kubectl. -set +e -set -uo pipefail - -NODES_COUNT=$((CONTROL_NODES_COUNT + WORKER_NODES_COUNT)) -JOINWAIT=0 - -# Reports how many nodes are registered and fulfill condition=ready. -num_nodes_ready() { - kubectl get nodes -o json | - jq '.items | map(select(.status.conditions[] | .type == "Ready" and .status == "True")) | length' -} - -# Reports how many API server pods are ready. -num_apiservers_ready() { - kubectl get pods -n kube-system -l component=kube-apiserver -o json | - jq '.items | map(select(.status.conditions[] | .type == "Ready" and .status == "True")) | length' -} - -# Prints node joining progress. -report_join_progress() { - echo -n "nodes_joined=$(kubectl get nodes -o json | jq '.items | length')/${NODES_COUNT} " - echo -n "nodes_ready=$(num_nodes_ready)/${NODES_COUNT} " - echo "api_servers_ready=$(num_apiservers_ready)/${CONTROL_NODES_COUNT} ..." -} - -# Indicates by exit code whether the cluster is ready, i.e. all nodes and API servers are ready. -cluster_ready() { - [[ "$(num_nodes_ready)" == "${NODES_COUNT}" && "$(num_apiservers_ready)" == "${CONTROL_NODES_COUNT}" ]] -} - -echo "::group::Wait for nodes" -until cluster_ready || [[ ${JOINWAIT} -gt ${JOINTIMEOUT} ]]; do - report_join_progress - JOINWAIT=$((JOINWAIT + 30)) - sleep 30 -done -report_join_progress -if [[ ${JOINWAIT} -gt ${JOINTIMEOUT} ]]; then - set -x - kubectl get nodes -o wide - kubectl get pods -n kube-system -o wide - kubectl get events -n kube-system - set +x - echo "::error::timeout reached before all nodes became ready" - echo "::endgroup::" - exit 1 -fi -echo "::endgroup::" diff --git a/.github/actions/constellation_destroy/action.yml b/.github/actions/constellation_destroy/action.yml index c09148efa..af172c0c1 100644 --- a/.github/actions/constellation_destroy/action.yml +++ b/.github/actions/constellation_destroy/action.yml @@ -24,7 +24,6 @@ runs: - name: Delete persistent volumes if: inputs.kubeconfig != '' shell: bash - continue-on-error: true env: KUBECONFIG: ${{ inputs.kubeconfig }} PV_DELETION_TIMEOUT: "120" # 2 minutes timeout for pv deletion @@ -35,14 +34,6 @@ runs: # Scrap namespaces that contain PVCs for namespace in `kubectl get namespace --no-headers=true -o custom-columns=":metadata.name"`; do if [[ `kubectl get pvc -n $namespace --no-headers=true -o custom-columns=":metadata.name" | wc -l` -gt 0 ]]; then - if [[ "${namespace}" == "default" ]]; then - kubectl delete all --all --namespace "default" --wait - continue - fi - if [[ "${namespace}" == "kube-system" ]]; then - kubectl delete pvc --all --namespace "kube-system" --wait - continue - fi kubectl delete namespace $namespace --wait fi done @@ -67,7 +58,7 @@ runs: - name: Login to AWS (Cluster role) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2ECluster aws-region: eu-central-1 diff --git a/.github/actions/constellation_iam_create/action.yml b/.github/actions/constellation_iam_create/action.yml index 46c5ef939..aca32378e 100644 --- a/.github/actions/constellation_iam_create/action.yml +++ b/.github/actions/constellation_iam_create/action.yml @@ -14,10 +14,6 @@ inputs: namePrefix: description: "Name prefix to use for resources." required: true - additionalTags: - description: "Additional resource tags that will be written into the constellation configuration." - default: "" - required: false # # AWS specific inputs # @@ -27,9 +23,6 @@ inputs: # # Azure specific inputs # - azureSubscriptionID: - description: "Azure subscription ID to deploy Constellation in." - required: true azureRegion: description: "Azure region to deploy Constellation in." required: false @@ -42,15 +35,6 @@ inputs: gcpZone: description: "The GCP zone to deploy Constellation in." required: false - # - # STACKIT specific inputs - # - stackitZone: - description: "The STACKIT zone to deploy Constellation in." - required: false - stackitProjectID: - description: "The STACKIT project ID to deploy Constellation in." - required: false runs: using: "composite" @@ -64,14 +48,8 @@ runs: kubernetesFlag="--kubernetes=${{ inputs.kubernetesVersion }}" fi - # TODO(v2.17): Remove this fallback and always use --tags flag - tagsFlag="" - if constellation config generate --help | grep -q -- --tags; then - tagsFlag="--tags=\"${{ inputs.additionalTags }}\"" - fi - echo "flag=--update-config" | tee -a "$GITHUB_OUTPUT" - constellation config generate ${{ inputs.cloudProvider }} ${kubernetesFlag} --attestation ${{ inputs.attestationVariant }} ${tagsFlag} + constellation config generate ${{ inputs.cloudProvider }} ${kubernetesFlag} --attestation ${{ inputs.attestationVariant }} - name: Constellation iam create aws shell: bash @@ -88,21 +66,14 @@ runs: shell: bash if: inputs.cloudProvider == 'azure' run: | - extraFlags="" - - if [[ $(constellation iam create azure --help | grep -c -- --subscriptionID) -ne 0 ]]; then - extraFlags="--subscriptionID=${{ inputs.azureSubscriptionID }}" - fi - constellation iam create azure \ --region="${{ inputs.azureRegion }}" \ --resourceGroup="${{ inputs.namePrefix }}-rg" \ --servicePrincipal="${{ inputs.namePrefix }}-sp" \ --update-config \ --tf-log=DEBUG \ - --yes ${extraFlags} + --yes - # TODO: Replace deprecated --serviceAccountID with --prefix - name: Constellation iam create gcp shell: bash if: inputs.cloudProvider == 'gcp' @@ -114,13 +85,3 @@ runs: --update-config \ --tf-log=DEBUG \ --yes - - - name: Set STACKIT-specific configuration - shell: bash - if: inputs.cloudProvider == 'stackit' - env: - STACKIT_PROJECT_ID: ${{ inputs.stackitProjectID }} - run: | - yq eval -i "(.provider.openstack.stackitProjectID) = \"${STACKIT_PROJECT_ID}\"" constellation-conf.yaml - yq eval -i "(.provider.openstack.availabilityZone) = \"${{ inputs.stackitZone }}\"" constellation-conf.yaml - yq eval -i "(.nodeGroups.[].zone) = \"${{ inputs.stackitZone }}\"" constellation-conf.yaml diff --git a/.github/actions/constellation_iam_destroy/action.yml b/.github/actions/constellation_iam_destroy/action.yml index 98109d740..e89dd7c5d 100644 --- a/.github/actions/constellation_iam_destroy/action.yml +++ b/.github/actions/constellation_iam_destroy/action.yml @@ -23,7 +23,7 @@ runs: - name: Login to AWS (IAM role) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2EIAM aws-region: eu-central-1 diff --git a/.github/actions/container_registry_login/action.yml b/.github/actions/container_registry_login/action.yml index 929af6361..fd3b7f230 100644 --- a/.github/actions/container_registry_login/action.yml +++ b/.github/actions/container_registry_login/action.yml @@ -17,7 +17,7 @@ runs: steps: - name: Use docker for logging in if: runner.os != 'macOS' - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: registry: ${{ inputs.registry }} username: ${{ inputs.username }} diff --git a/.github/actions/container_sbom/action.yml b/.github/actions/container_sbom/action.yml index 0d259003d..5c2e501be 100644 --- a/.github/actions/container_sbom/action.yml +++ b/.github/actions/container_sbom/action.yml @@ -19,7 +19,7 @@ runs: steps: - name: Install Cosign if: inputs.cosignPublicKey != '' && inputs.cosignPrivateKey != '' && inputs.cosignPassword != '' - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 + uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0 - name: Download Syft & Grype uses: ./.github/actions/install_syft_grype @@ -36,7 +36,7 @@ runs: syft packages ${{ inputs.containerReference }} -o cyclonedx-json > container-image-predicate.json cosign attest ${{ inputs.containerReference }} --key env://COSIGN_PRIVATE_KEY --predicate container-image-predicate.json --type "https://cyclonedx.org/bom" > container-image.att.json cosign attach attestation ${{ inputs.containerReference }} --attestation container-image.att.json - # TODO: type should be auto-discovered after issue is resolved: + # TODO(3u13r): type should be auto-discovered after issue is resolved: # https://github.com/sigstore/cosign/issues/2264 cosign verify-attestation ${{ inputs.containerReference }} --type "https://cyclonedx.org/bom" --key env://COSIGN_PUBLIC_KEY grype ${{ inputs.containerReference }} --fail-on high --only-fixed --add-cpes-if-none diff --git a/.github/actions/deploy_logcollection/action.yml b/.github/actions/deploy_logcollection/action.yml index 78abb5146..5b3710aa4 100644 --- a/.github/actions/deploy_logcollection/action.yml +++ b/.github/actions/deploy_logcollection/action.yml @@ -67,7 +67,7 @@ runs: # Make sure that helm is installed # This is not always the case, e.g. on MacOS runners - name: Install Helm - uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0 + uses: azure/setup-helm@29960d0f5f19214b88e1d9ba750a9914ab0f1a2f # v4.0.0 with: version: v3.9.0 diff --git a/.github/actions/download_release_binaries/action.yml b/.github/actions/download_release_binaries/action.yml index 6b5604c24..7fceffb5f 100644 --- a/.github/actions/download_release_binaries/action.yml +++ b/.github/actions/download_release_binaries/action.yml @@ -5,51 +5,51 @@ runs: using: "composite" steps: - name: Download CLI binaries darwin-amd64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-darwin-amd64 - name: Download CLI binaries darwin-arm64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-darwin-arm64 - name: Download CLI binaries linux-amd64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-linux-amd64 - name: Download CLI binaries linux-arm64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-linux-arm64 - name: Download CLI binaries windows-amd64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-windows-amd64 - name: Download Terraform module - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: terraform-module - name: Download Terraform provider binary darwin-amd64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: terraform-provider-constellation-darwin-amd64 - name: Download Terraform provider binary darwin-arm64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: terraform-provider-constellation-darwin-arm64 - name: Download Terraform provider binary linux-amd64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: terraform-provider-constellation-linux-amd64 - name: Download Terraform provider binary linux-arm64 - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: terraform-provider-constellation-linux-arm64 diff --git a/.github/actions/e2e_attestationconfigapi/action.yml b/.github/actions/e2e_attestationconfigapi/action.yml index 9ffad3040..56f0a94f7 100644 --- a/.github/actions/e2e_attestationconfigapi/action.yml +++ b/.github/actions/e2e_attestationconfigapi/action.yml @@ -2,9 +2,12 @@ name: E2E Attestationconfig API Test description: "Test the attestationconfig CLI is functional." inputs: - attestationVariant: - description: "attestation variant to run tests against" - default: "azure-sev-snp" + csp: + description: "Cloud provider to run tests against" + default: "azure" + buildBuddyApiKey: + description: "BuildBuddy API key for caching Bazel artifacts" + required: true cosignPrivateKey: description: "Cosign private key" required: true @@ -17,9 +20,12 @@ runs: steps: - name: Setup bazel uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ inputs.buildBuddyApiKey }} - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubTestResourceAPI aws-region: eu-west-1 @@ -30,4 +36,4 @@ runs: COSIGN_PRIVATE_KEY: ${{ inputs.cosignPrivateKey }} COSIGN_PASSWORD: ${{ inputs.cosignPassword }} run: | - bazel run //internal/api/attestationconfigapi/cli:cli_e2e_test -- ${{ inputs.attestationVariant }} + bazel run //internal/api/attestationconfigapi/cli:cli_e2e_test -- ${{ inputs.csp }} diff --git a/.github/actions/e2e_autoscaling/action.yml b/.github/actions/e2e_autoscaling/action.yml index 96c9907ed..bf787100b 100644 --- a/.github/actions/e2e_autoscaling/action.yml +++ b/.github/actions/e2e_autoscaling/action.yml @@ -82,30 +82,7 @@ runs: KUBECONFIG: ${{ inputs.kubeconfig }} run: | worker_count=${{ steps.worker_count.outputs.worker_count }} - - cat < - benchmarks/constellation-${{ inputs.attestationVariant }}.json + benchmarks/constellation-${{ inputs.cloudProvider }}.json name: "benchmarks-${{ inputs.artifactNameSuffix }}" encryptionSecret: ${{ inputs.encryptionSecret }} - name: Assume AWS role to retrieve and update benchmarks in S3 - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionUpdateBenchmarks aws-region: us-east-2 @@ -179,10 +166,12 @@ runs: - name: Get previous benchmark records from S3 shell: bash + env: + CSP: ${{ inputs.cloudProvider }} run: | - if aws s3 cp "${S3_PATH}/constellation-${{ inputs.attestationVariant }}.json" ./ --no-progress - then - mv "constellation-${{ inputs.attestationVariant }}.json" "benchmarks/constellation-${{ inputs.attestationVariant }}-previous.json" + aws s3 cp --recursive ${S3_PATH} ./ --no-progress + if [[ -f constellation-${CSP}.json ]]; then + mv constellation-${CSP}.json benchmarks/constellation-${CSP}-previous.json else echo "::warning::Couldn't retrieve previous benchmark records from s3" fi @@ -191,15 +180,15 @@ runs: shell: bash env: # Paths to benchmark results as JSON of the previous run and the current run - PREV_BENCH: benchmarks/constellation-${{ inputs.attestationVariant }}-previous.json - CURR_BENCH: benchmarks/constellation-${{ inputs.attestationVariant }}.json + PREV_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}-previous.json + CURR_BENCH: benchmarks/constellation-${{ inputs.cloudProvider }}.json run: | if [[ -f "$PREV_BENCH" ]]; then # Fails if the results are outside the threshold range python .github/actions/e2e_benchmark/evaluate/compare.py >> $GITHUB_STEP_SUMMARY fi - - name: Upload benchmark results to OpenSearch + - name: Upload benchmark results to opensearch if: (!env.ACT) shell: bash env: @@ -209,12 +198,14 @@ runs: run: | curl -XPOST \ -u "${OPENSEARCH_USER}:${OPENSEARCH_PWD}" \ - "${OPENSEARCH_DOMAIN}/benchmarks-${{ inputs.attestationVariant }}-$(date '+%Y')"/_doc \ - --data-binary @benchmarks/constellation-${{ inputs.attestationVariant }}.json \ + "${OPENSEARCH_DOMAIN}/benchmarks-${{ inputs.cloudProvider }}-$(date '+%Y')"/_doc \ + --data-binary @benchmarks/constellation-${{ inputs.cloudProvider }}.json \ -H 'Content-Type: application/json' - name: Update benchmark records in S3 if: github.ref_name == 'main' shell: bash + env: + CSP: ${{ inputs.cloudProvider }} run: | - aws s3 cp benchmarks/constellation-${{ inputs.attestationVariant }}.json ${S3_PATH}/constellation-${{ inputs.attestationVariant }}.json + aws s3 cp benchmarks/constellation-${CSP}.json ${S3_PATH}/constellation-${CSP}.json diff --git a/.github/actions/e2e_benchmark/evaluate/compare.py b/.github/actions/e2e_benchmark/evaluate/compare.py index 87faac09e..59d10a186 100644 --- a/.github/actions/e2e_benchmark/evaluate/compare.py +++ b/.github/actions/e2e_benchmark/evaluate/compare.py @@ -94,18 +94,18 @@ class BenchmarkComparer: raise ValueError('Failed reading benchmark file: {e}'.format(e=e)) try: - name = bench_curr['attestationVariant'] + name = bench_curr['provider'] except KeyError: raise ValueError( - 'Current benchmark record file does not contain attestationVariant.') + 'Current benchmark record file does not contain provider.') try: - prev_name = bench_prev['attestationVariant'] + prev_name = bench_prev['provider'] except KeyError: raise ValueError( - 'Previous benchmark record file does not contain attestationVariant.') + 'Previous benchmark record file does not contain provider.') if name != prev_name: raise ValueError( - 'Cloud attestationVariants of previous and current benchmark data do not match.') + 'Cloud providers of previous and current benchmark data do not match.') if 'fio' not in bench_prev.keys() or 'fio' not in bench_curr.keys(): raise ValueError('Benchmarks do not both contain fio records.') diff --git a/.github/actions/e2e_benchmark/evaluate/parse.py b/.github/actions/e2e_benchmark/evaluate/parse.py index 8d9353343..fedce5c70 100644 --- a/.github/actions/e2e_benchmark/evaluate/parse.py +++ b/.github/actions/e2e_benchmark/evaluate/parse.py @@ -7,7 +7,7 @@ from datetime import datetime from evaluators import fio, knb -def configure() -> Tuple[str, str, str, str, str | None, str, str, str, str]: +def configure() -> Tuple[str, str, str, str | None, str, str, str, str]: """Read the benchmark data paths. Expects ENV vars (required): @@ -25,29 +25,27 @@ def configure() -> Tuple[str, str, str, str, str | None, str, str, str, str]: """ base_path = os.environ.get('BENCH_RESULTS', None) csp = os.environ.get('CSP', None) - attestation_variant = os.environ.get('ATTESTATION_VARIANT', None) out_dir = os.environ.get('BDIR', None) - if not base_path or not csp or not out_dir or not attestation_variant: + if not base_path or not csp or not out_dir: raise TypeError( - 'ENV variables BENCH_RESULTS, CSP, BDIR, ATTESTATION_VARIANT are required.') + 'ENV variables BENCH_RESULTS, CSP, BDIR are required.') ext_provider_name = os.environ.get('EXT_NAME', None) commit_hash = os.environ.get('GITHUB_SHA', 'N/A') commit_ref = os.environ.get('GITHUB_REF_NAME', 'N/A') actor = os.environ.get('GITHUB_ACTOR', 'N/A') workflow = os.environ.get('GITHUB_WORKFLOW', 'N/A') - return base_path, csp, attestation_variant, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow + return base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow class BenchmarkParser: - def __init__(self, base_path, csp, attestation_variant, out_dir, ext_provider_name=None, commit_hash="N/A", commit_ref="N/A", actor="N/A", workflow="N/A"): + def __init__(self, base_path, csp, out_dir, ext_provider_name=None, commit_hash="N/A", commit_ref="N/A", actor="N/A", workflow="N/A"): self.base_path = base_path self.csp = csp - self.attestation_variant = attestation_variant self.out_dir = out_dir self.ext_provider_name = ext_provider_name if not self.ext_provider_name: - self.ext_provider_name = f'constellation-{attestation_variant}' + self.ext_provider_name = f'constellation-{csp}' self.commit_hash = commit_hash self.commit_ref = commit_ref self.actor = actor @@ -90,7 +88,6 @@ class BenchmarkParser: }, '@timestamp': str(timestamp), 'provider': self.ext_provider_name, - 'attestationVariant': self.attestation_variant, 'fio': {}, 'knb': {}} @@ -104,8 +101,8 @@ class BenchmarkParser: def main(): - base_path, csp, attestation_variant, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow = configure() - p = BenchmarkParser(base_path, csp, attestation_variant, out_dir, ext_provider_name, + base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow = configure() + p = BenchmarkParser(base_path, csp, out_dir, ext_provider_name, commit_hash, commit_ref, actor, workflow) p.parse() diff --git a/.github/actions/e2e_benchmark/evaluate/requirements.txt b/.github/actions/e2e_benchmark/evaluate/requirements.txt index c2208b0c2..7f2f1bd57 100644 --- a/.github/actions/e2e_benchmark/evaluate/requirements.txt +++ b/.github/actions/e2e_benchmark/evaluate/requirements.txt @@ -1,3 +1,3 @@ -numpy ==2.3.0 -matplotlib ==3.10.3 -Pillow ==11.3.0 \ No newline at end of file +numpy ==1.26.4 +matplotlib ==3.8.3 +Pillow ==10.2.0 \ No newline at end of file diff --git a/.github/actions/e2e_benchmark/fio.ini b/.github/actions/e2e_benchmark/fio.ini index 33960341a..c956bc7b4 100644 --- a/.github/actions/e2e_benchmark/fio.ini +++ b/.github/actions/e2e_benchmark/fio.ini @@ -7,7 +7,7 @@ size=10Gi time_based=1 group_reporting thread -cpus_allowed=0 +cpus_allowed=1 [read_iops] diff --git a/.github/actions/e2e_cleanup_timeframe/action.yml b/.github/actions/e2e_cleanup_timeframe/action.yml deleted file mode 100644 index c77be2431..000000000 --- a/.github/actions/e2e_cleanup_timeframe/action.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: E2E cleanup over timeframe -description: Clean up old terraform resources of E2E tests - -inputs: - ghToken: - description: 'The github token that is used with the github CLI.' - required: true - encryptionSecret: - description: 'The secret to use for decrypting the artifacts.' - required: true - azure_credentials: - description: "Credentials authorized to create Constellation on Azure." - required: true - openStackCloudsYaml: - description: "The contents of ~/.config/openstack/clouds.yaml" - required: false - stackitUat: - description: "The UAT for STACKIT" - required: false - -runs: - using: "composite" - steps: - - name: Authenticate AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 - with: - role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2EDestroy - aws-region: eu-central-1 - - - name: Authenticate Azure - uses: ./.github/actions/login_azure - with: - azure_credentials: ${{ inputs.azure_credentials }} - - - name: Authenticate GCP - uses: ./.github/actions/login_gcp - with: - service_account: "destroy-e2e@constellation-e2e.iam.gserviceaccount.com" - - - name: Login to OpenStack - uses: ./.github/actions/login_openstack - with: - clouds_yaml: ${{ inputs.openStackCloudsYaml }} - - - name: Login to STACKIT - uses: ./.github/actions/login_stackit - with: - serviceAccountToken: ${{ inputs.stackitUat }} - - - name: Install tools - uses: ./.github/actions/setup_bazel_nix - with: - nixTools: | - _7zz - terraform - - - name: Run cleanup - run: ./.github/actions/e2e_cleanup_timeframe/e2e-cleanup.sh - shell: bash - env: - GH_TOKEN: ${{ inputs.ghToken }} - ENCRYPTION_SECRET: ${{ inputs.encryptionSecret }} diff --git a/.github/actions/e2e_cleanup_timeframe/e2e-cleanup.sh b/.github/actions/e2e_cleanup_timeframe/e2e-cleanup.sh deleted file mode 100755 index 0796ae1a0..000000000 --- a/.github/actions/e2e_cleanup_timeframe/e2e-cleanup.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -# get_e2e_test_ids_on_date gets all workflow IDs of workflows that contain "e2e" on a specific date. -function get_e2e_test_ids_on_date { - ids="$(gh run list --created "$1" --status failure --json createdAt,workflowName,databaseId --jq '.[] | select(.workflowName | (contains("e2e") or contains("Release")) and (contains("MiniConstellation") | not)) | .databaseId' -L1000 -R edgelesssys/constellation || exit 1)" - echo "${ids}" -} - -# download_tfstate_artifact downloads all artifacts matching the pattern terraform-state-* from a given workflow ID. -function download_tfstate_artifact { - gh run download "$1" -p "terraform-state-*" -R edgelesssys/constellation > /dev/null -} - -# delete_terraform_resources runs terraform destroy on the given folder. -function delete_terraform_resources { - delete_err=0 - if pushd "${1}/${2}"; then - # Workaround for cleaning up Azure resources - # We include a data source that is only used to generate output - # If this data source is deleted before we call terraform destroy, - # terraform will first try to evaluate the data source and fail, - # causing the destroy to fail as well. - sed -i '/data "azurerm_user_assigned_identity" "uaid" {/,/}/d' main.tf - sed -i '/output "user_assigned_identity_client_id" {/,/}/d' outputs.tf - - terraform init > /dev/null || delete_err=1 # first, install plugins - terraform destroy -auto-approve || delete_err=1 - popd || exit 1 - fi - return "${delete_err}" -} - -# check if the password for artifact decryption was given -if [[ -z ${ENCRYPTION_SECRET} ]]; then - echo "ENCRYPTION_SECRET is not set. Please set an environment variable with that secret." - exit 1 -fi - -artifact_pwd=${ENCRYPTION_SECRET} - -shopt -s nullglob - -start_date=$(date "+%Y-%m-%d") -end_date=$(date --date "-4 day" "+%Y-%m-%d") -dates_to_clean=() - -# get all dates of the last week -while [[ ${end_date} != "${start_date}" ]]; do - dates_to_clean+=("${end_date}") - end_date=$(date --date "${end_date} +1 day" "+%Y-%m-%d") -done - -echo "[*] retrieving run IDs for cleanup" -database_ids=() -for d in "${dates_to_clean[@]}"; do - echo " retrieving run IDs from $d" - mapfile -td " " tmp < <(get_e2e_test_ids_on_date "$d") - database_ids+=("${tmp[*]}") -done - -# cleanup database_ids -mapfile -t database_ids < <(echo "${database_ids[@]}") -mapfile -td " " database_ids < <(echo "${database_ids[@]}") - -echo "[*] downloading terraform state artifacts" -for id in "${database_ids[@]}"; do - if [[ ${id} == *[^[:space:]]* ]]; then - echo " downloading from workflow ${id}" - download_tfstate_artifact "${id}" - fi -done - -echo "[*] extracting artifacts" -for directory in ./terraform-state-*; do - echo " extracting ${directory}" - - # extract and decrypt the artifact - 7zz x -t7z -p"${artifact_pwd}" -o"${directory}" "${directory}/archive.7z" > /dev/null || exit 1 -done - -# create terraform caching directory -mkdir "${HOME}/tf_plugin_cache" -export TF_PLUGIN_CACHE_DIR="${HOME}/tf_plugin_cache" -echo "[*] created terraform cache directory ${TF_PLUGIN_CACHE_DIR}" - -echo "[*] deleting resources" -error_occurred=0 -for directory in ./terraform-state-*; do - echo " deleting resources in ${directory}" - if ! delete_terraform_resources "${directory}" "constellation-terraform"; then - echo "[!] deleting resources failed" - error_occurred=1 - fi - echo " deleting IAM configuration in ${directory}" - if ! delete_terraform_resources "${directory}" "constellation-iam-terraform"; then - echo "[!] deleting IAM resources failed" - error_occurred=1 - fi - echo " deleting directory ${directory}" - rm -rf "${directory}" -done - -if [[ ${error_occurred} -ne 0 ]]; then - echo "[!] Errors occurred during resource deletion." - exit 1 -fi - -exit 0 diff --git a/.github/actions/e2e_emergency_ssh/action.yml b/.github/actions/e2e_emergency_ssh/action.yml deleted file mode 100644 index 27b3e8b13..000000000 --- a/.github/actions/e2e_emergency_ssh/action.yml +++ /dev/null @@ -1,70 +0,0 @@ -name: Emergency ssh -description: "Verify that an emergency ssh connection can be established." - -inputs: - kubeconfig: - description: "The kubeconfig file for the cluster." - required: true - -runs: - using: "composite" - steps: - - name: Test emergency ssh - shell: bash - env: - KUBECONFIG: ${{ inputs.kubeconfig }} - run: | - set -euo pipefail - - # Activate emergency ssh access to the cluster - pushd ./constellation-terraform - echo "emergency_ssh = true" >> terraform.tfvars - terraform apply -auto-approve - lb="$(terraform output -raw loadbalancer_address)" - popd - - lb_ip="$(gethostip $lb | awk '{print $2}')" - echo "Resolved ip of load balancer: $lb_ip" - - # write ssh config - cat > ssh_config <> $GITHUB_PATH export PATH="$PATH:$(pwd)" constellation version + # Do not spam license server from pipeline + sudo sh -c 'echo "127.0.0.1 license.confidential.cloud" >> /etc/hosts' - name: Build Terraform provider binary if: inputs.clusterCreation == 'terraform' && inputs.cliVersion == '' @@ -229,7 +220,7 @@ runs: - name: Login to AWS (IAM role) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2EIAM aws-region: eu-central-1 @@ -242,30 +233,12 @@ runs: with: azure_credentials: ${{ inputs.azureIAMCreateCredentials }} - - name: Login to OpenStack - if: inputs.cloudProvider == 'stackit' - uses: ./.github/actions/login_openstack - with: - clouds_yaml: ${{inputs.openStackCloudsYaml }} - - - name: Login to STACKIT - if: inputs.cloudProvider == 'stackit' - uses: ./.github/actions/login_stackit - with: - serviceAccountToken: ${{ inputs.stackitUat }} - - name: Create prefix id: create-prefix shell: bash run: | uuid=$(uuidgen | tr "[:upper:]" "[:lower:]") uuid=${uuid%%-*} - - # GCP has a 6 character limit the additional uuid prefix since the full prefix length has a maximum of 24 - if [[ ${{ inputs.cloudProvider }} == 'gcp' ]]; then - uuid=${uuid:0:6} - fi - echo "uuid=${uuid}" | tee -a $GITHUB_OUTPUT echo "prefix=e2e-${{ github.run_id }}-${{ github.run_attempt }}-${uuid}" | tee -a $GITHUB_OUTPUT @@ -275,7 +248,7 @@ runs: with: attestationVariant: ${{ inputs.attestationVariant }} - - name: Create Constellation config and IAM + - name: Create IAM configuration id: constellation-iam-create uses: ./.github/actions/constellation_iam_create with: @@ -283,14 +256,10 @@ runs: attestationVariant: ${{ inputs.attestationVariant }} namePrefix: ${{ steps.create-prefix.outputs.prefix }} awsZone: ${{ inputs.regionZone || 'us-east-2c' }} - azureSubscriptionID: ${{ inputs.azureSubscriptionID }} azureRegion: ${{ inputs.regionZone || steps.pick-az-region.outputs.region }} gcpProjectID: ${{ inputs.gcpProject }} gcpZone: ${{ inputs.regionZone || 'europe-west3-b' }} - stackitZone: ${{ inputs.regionZone || 'eu01-2' }} - stackitProjectID: ${{ inputs.stackitProjectID }} kubernetesVersion: ${{ inputs.kubernetesVersion }} - additionalTags: "workflow=${{ github.run_id }}" - name: Login to GCP (Cluster service account) if: inputs.cloudProvider == 'gcp' @@ -300,7 +269,7 @@ runs: - name: Login to AWS (Cluster role) if: inputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2ECluster aws-region: eu-central-1 @@ -362,7 +331,7 @@ runs: if: (inputs.test == 'nop') || (inputs.test == 'upgrade') shell: bash run: | - echo "This test has a nop payload. It doesn't run any tests." + echo "::warning::This test has a nop payload. It doesn't run any tests." echo "Sleeping for 30 seconds to allow logs to propagate to the log collection service." sleep 30 @@ -379,21 +348,12 @@ runs: if: inputs.test == 'sonobuoy full' uses: ./.github/actions/e2e_sonobuoy with: - # TODO: Remove E2E_SKIP once AB#2174 is resolved + # TODO(3u13r): Remove E2E_SKIP once AB#2174 is resolved sonobuoyTestSuiteCmd: '--plugin e2e --plugin-env e2e.E2E_FOCUS="\[Conformance\]" --plugin-env e2e.E2E_SKIP="for service with type clusterIP|HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol|Services should serve endpoints on same port and different protocols" --plugin https://raw.githubusercontent.com/vmware-tanzu/sonobuoy-plugins/102cd62a4091f80a795189f64ccc20738f931ef0/cis-benchmarks/kube-bench-plugin.yaml --plugin https://raw.githubusercontent.com/vmware-tanzu/sonobuoy-plugins/102cd62a4091f80a795189f64ccc20738f931ef0/cis-benchmarks/kube-bench-master-plugin.yaml' kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }} artifactNameSuffix: ${{ steps.create-prefix.outputs.prefix }} encryptionSecret: ${{ inputs.encryptionSecret }} - - name: Run sonobuoy conformance - if: inputs.test == 'sonobuoy conformance' - uses: ./.github/actions/e2e_sonobuoy - with: - sonobuoyTestSuiteCmd: "--plugin e2e --mode certified-conformance" - kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }} - artifactNameSuffix: ${{ steps.create-prefix.outputs.prefix }} - encryptionSecret: ${{ inputs.encryptionSecret }} - - name: Run autoscaling test if: inputs.test == 'autoscaling' uses: ./.github/actions/e2e_autoscaling @@ -405,7 +365,6 @@ runs: uses: ./.github/actions/e2e_lb with: kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }} - cloudProvider: ${{ inputs.cloudProvider }} - name: Run Performance Benchmark if: inputs.test == 'perf-bench' @@ -453,10 +412,5 @@ runs: kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }} s3AccessKey: ${{ inputs.s3AccessKey }} s3SecretKey: ${{ inputs.s3SecretKey }} + buildBuddyApiKey: ${{ inputs.buildBuddyApiKey }} githubToken: ${{ inputs.githubToken }} - - - name: Run emergency ssh test - if: inputs.test == 'emergency ssh' - uses: ./.github/actions/e2e_emergency_ssh - with: - kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }} diff --git a/.github/actions/e2e_verify/action.yml b/.github/actions/e2e_verify/action.yml index 6803124f6..aca4fdceb 100644 --- a/.github/actions/e2e_verify/action.yml +++ b/.github/actions/e2e_verify/action.yml @@ -66,46 +66,45 @@ runs: forwarderPID=$! sleep 5 - case "${{ inputs.attestationVariant }}" - in - "azure-sev-snp"|"azure-tdx"|"aws-sev-snp"|"gcp-sev-snp") - echo "Extracting TCB versions for API update" - constellation verify --cluster-id "${clusterID}" --node-endpoint localhost:9090 -o json > "attestation-report-${node}.json" - ;; - *) - constellation verify --cluster-id "${clusterID}" --node-endpoint localhost:9090 - ;; - esac + if [[ ${{ inputs.attestationVariant }} == "azure-sev-snp" ]] || [[ ${{ inputs.attestationVariant }} == "aws-sev-snp" ]]; then + echo "Extracting TCB versions for API update" + constellation verify --cluster-id "${clusterID}" --node-endpoint localhost:9090 -o json > "snp-report-${node}.json" + else + constellation verify --cluster-id "${clusterID}" --node-endpoint localhost:9090 + fi kill $forwarderPID done - name: Login to AWS if: github.ref_name == 'main' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GitHubConstellationImagePipeline aws-region: eu-central-1 - name: Upload extracted TCBs - if: github.ref_name == 'main' && (inputs.attestationVariant == 'azure-sev-snp' || inputs.attestationVariant == 'azure-tdx' || inputs.attestationVariant == 'aws-sev-snp' || inputs.attestationVariant == 'gcp-sev-snp') + if: github.ref_name == 'main' && (inputs.attestationVariant == 'azure-sev-snp' || inputs.attestationVariant == 'aws-sev-snp') shell: bash env: COSIGN_PASSWORD: ${{ inputs.cosignPassword }} COSIGN_PRIVATE_KEY: ${{ inputs.cosignPrivateKey }} run: | - reports=attestation-report-*.json + if [[ ${{ inputs.attestationVariant }} == "aws-sev-snp" ]] && constellation version | grep -q "v2.13."; then + echo "Skipping TCB upload for AWS on CLI v2.13" + exit 0 + fi - # bazel run changes the working directory - # convert the relative paths to absolute paths to avoid issues - absolute_reports="" - for report in ${reports}; do - absolute_reports="${absolute_reports} $(realpath "${report}")" + reports=(snp-report-*.json) + if [ -z ${#reports[@]} ]; then + exit 1 + fi + + attestationVariant=${{ inputs.attestationVariant }} + cloudProvider=${attestationVariant%%-*} + + for file in "${reports[@]}"; do + path=$(realpath "${file}") + cat "${path}" + bazel run //internal/api/attestationconfigapi/cli -- upload "${cloudProvider}" snp-report "${path}" done - - report=$(bazel run //internal/api/attestationconfigapi/cli -- compare ${{ inputs.attestationVariant }} ${absolute_reports}) - - path=$(realpath "${report}") - cat "${path}" - - bazel run //internal/api/attestationconfigapi/cli -- upload ${{ inputs.attestationVariant }} attestation-report "${path}" diff --git a/.github/actions/find_latest_image/action.yml b/.github/actions/find_latest_image/action.yml index 2495df405..2e061933f 100644 --- a/.github/actions/find_latest_image/action.yml +++ b/.github/actions/find_latest_image/action.yml @@ -26,25 +26,23 @@ runs: steps: - name: Checkout head if: inputs.imageVersion == '' && inputs.git-ref == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.imageVersion == '' && inputs.git-ref != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.git-ref }} - name: Login to AWS if: inputs.imageVersion == '' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRead aws-region: eu-central-1 - - uses: ./.github/actions/setup_bazel_nix - - name: Find latest image id: find-latest-image if: inputs.imageVersion == '' diff --git a/.github/actions/gcpccm_vers_to_build/findvers.sh b/.github/actions/gcpccm_vers_to_build/findvers.sh index 1b148d4f4..dd35c582a 100755 --- a/.github/actions/gcpccm_vers_to_build/findvers.sh +++ b/.github/actions/gcpccm_vers_to_build/findvers.sh @@ -82,4 +82,4 @@ for major in "${allMajorVersions[@]}"; do done # Print one elem per line | quote elems | create array | remove empty elems and print compact. -printf '%s\n' "${versionsToBuild[@]}" | jq -R | jq -s | jq -c 'map(select(length > 0))' +printf '%s' "${versionsToBuild[@]}" | jq -R | jq -s | jq -c 'map(select(length > 0))' diff --git a/.github/actions/login_azure/action.yml b/.github/actions/login_azure/action.yml index 50ab87771..14d1a83f2 100644 --- a/.github/actions/login_azure/action.yml +++ b/.github/actions/login_azure/action.yml @@ -10,6 +10,6 @@ runs: # As described at: # https://github.com/Azure/login#configure-deployment-credentials - name: Login to Azure - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 + uses: azure/login@cb79c773a3cfa27f31f25eb3f677781210c9ce3d # v1.6.1 with: creds: ${{ inputs.azure_credentials }} diff --git a/.github/actions/login_gcp/action.yml b/.github/actions/login_gcp/action.yml index c32249c73..36584ed69 100644 --- a/.github/actions/login_gcp/action.yml +++ b/.github/actions/login_gcp/action.yml @@ -20,11 +20,11 @@ runs: echo "GOOGLE_CLOUD_PROJECT=" >> "$GITHUB_ENV" - name: Authorize GCP access - uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10 + uses: google-github-actions/auth@a6e2e39c0a0331da29f7fd2c2a20a427e8d3ad1f # v2.1.1 with: - workload_identity_provider: projects/1052692473304/locations/global/workloadIdentityPools/constellation-ci-pool/providers/constellation-ci-provider + workload_identity_provider: projects/796962942582/locations/global/workloadIdentityPools/constellation-ci-pool/providers/constellation-ci-provider service_account: ${{ inputs.service_account }} # Even if preinstalled in Github Actions runner image, this setup does some magic authentication required for gsutil. - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 + uses: google-github-actions/setup-gcloud@98ddc00a17442e89a24bbf282954a3b65ce6d200 # v2.1.0 diff --git a/.github/actions/login_stackit/action.yml b/.github/actions/login_stackit/action.yml deleted file mode 100644 index a7ff58425..000000000 --- a/.github/actions/login_stackit/action.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: STACKIT login -description: "Login to STACKIT" -inputs: - serviceAccountToken: - description: "Credentials authorized to create Constellation on STACKIT." - required: true -runs: - using: "composite" - steps: - - name: Login to STACKIT - env: - UAT: ${{ inputs.serviceAccountToken }} - shell: bash - run: | - mkdir -p ~/.stackit - echo "${UAT}" > ~/.stackit/credentials.json diff --git a/.github/actions/notify_e2e_failure/action.yml b/.github/actions/notify_e2e_failure/action.yml index 56463a23f..b6c86f113 100644 --- a/.github/actions/notify_e2e_failure/action.yml +++ b/.github/actions/notify_e2e_failure/action.yml @@ -36,6 +36,12 @@ runs: shell: bash run: echo "CURRENT_DATE=$(date +'%Y-%m-%d %H:%M:%S')" >> $GITHUB_ENV + - name: Encode URI component + uses: Ablestor/encode-uri-component-action@790ea01bcf2d5ca4d0dbe8c15351a87b47f22f61 # v1.3 + id: encode-uri-component + with: + string: ${{ inputs.test }} + - name: Create body template id: body-template shell: bash @@ -63,15 +69,13 @@ runs: fi } - e2eTestPayload=$(echo "${{ inputs.test }}" | jq -R -r @uri) - q=$(echo "(filters:!( $(queryGen cloud.provider "${{ inputs.provider }}") $(queryGen metadata.github.ref-stream "${{ inputs.refStream }}") $(queryGen metadata.github.kubernetes-version "${{ inputs.kubernetesVersion }}") $(queryGen metadata.github.attestation-variant "${{ inputs.attestationVariant }}") $(queryGen metadata.github.cluster-creation "${{ inputs.clusterCreation }}") - $(queryGen metadata.github.e2e-test-payload "${e2eTestPayload}") + $(queryGen metadata.github.e2e-test-payload "${{ steps.encode-uri-component.outputs.string }}") (query:(match_phrase:(metadata.github.run-id:${{ github.run_id }}))) ))" | tr -d "\t\n ") diff --git a/.github/actions/notify_stackit/action.yml b/.github/actions/notify_stackit/action.yml deleted file mode 100644 index 2e64fdac5..000000000 --- a/.github/actions/notify_stackit/action.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Notify STACKIT -description: "Notify STACKIT about test failure" -inputs: - slackToken: - description: "Slack access token." - required: true -runs: - using: "composite" - steps: - - name: Notify STACKIT - env: - SLACK_TOKEN: ${{ inputs.slackToken }} - shell: bash - run: | - curl -X POST \ - -H "Authorization: Bearer $SLACK_TOKEN" \ - -H "Content-type: application/json; charset=utf-8" \ - -d "{\"channel\":\"C0827BT59SM\",\"text\":\"E2E test failed: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID\"}" \ - https://slack.com/api/chat.postMessage diff --git a/.github/actions/notify_teams/README.md b/.github/actions/notify_teams/README.md deleted file mode 100644 index 5fb6d724b..000000000 --- a/.github/actions/notify_teams/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# notify Teams action - -This action is used to send a message to our Teams channel in case of a failure in the CI/CD pipeline. -The action will automatically choose an engineer to assign to the issue and tag them in the message. - -Engineers are identified by their GitHub username and bound to a Microsoft Teams ID in `.attachments[0].content.msteams.entities`. -To add a new engineer, add a new entry to the entity list in the format: - -```json -{ - "type": "mention", - "text": "${github_username}", - "mentioned": { - "id": "${msteams_id}", - "name": "${name}" - } -} -``` - -Where `${github_username}` is the GitHub username of the engineer, `${msteams_id}` is the Microsoft Teams ID of the engineer, and `${name}` is the name of the engineer. -To find the Microsoft Teams ID use the following command: - -```bash -az ad user show --id ${email} --query id -``` - -Where `${email}` is the email address of the engineer. diff --git a/.github/actions/notify_teams/action.yml b/.github/actions/notify_teams/action.yml index e94a266a9..956a3a889 100644 --- a/.github/actions/notify_teams/action.yml +++ b/.github/actions/notify_teams/action.yml @@ -25,7 +25,7 @@ runs: continue-on-error: true shell: bash run: | - cp .github/actions/notify_teams/teams_payload_template.json teams_payload.json + cp .github/teams_payload_template.json teams_payload.json # Add workflow name to the notification yq -oj -iP '.attachments[0].content.body[0].columns[1].items[0].text = "${{ inputs.title }}"' teams_payload.json diff --git a/.github/actions/pick_assignee/action.yml b/.github/actions/pick_assignee/action.yml index ed9607e77..3e9cd4d64 100644 --- a/.github/actions/pick_assignee/action.yml +++ b/.github/actions/pick_assignee/action.yml @@ -14,6 +14,9 @@ runs: shell: bash run: | possibleAssignees=( + "elchead" + "malt3" + "3u13r" "daniel-weisse" "msanft" "burgerdev" diff --git a/.github/actions/publish_helmchart/action.yml b/.github/actions/publish_helmchart/action.yml index 3c26fbad7..3328d658f 100644 --- a/.github/actions/publish_helmchart/action.yml +++ b/.github/actions/publish_helmchart/action.yml @@ -13,7 +13,7 @@ runs: using: "composite" steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: edgelesssys/helm ref: main @@ -29,7 +29,7 @@ runs: echo version=$(yq eval ".version" ${{ inputs.chartPath }}/Chart.yaml) | tee -a $GITHUB_OUTPUT - name: Create pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + uses: peter-evans/create-pull-request@b1ddad2c994a25fbc81a28b3ec0e368bb2021c50 # v6.0.0 with: path: helm branch: "release/s3proxy/${{ steps.update-chart-version.outputs.version }}" diff --git a/.github/actions/select_image/action.yml b/.github/actions/select_image/action.yml index 6af36480d..391814256 100644 --- a/.github/actions/select_image/action.yml +++ b/.github/actions/select_image/action.yml @@ -3,22 +3,22 @@ description: Resolve string presets and shortpaths to shortpaths only inputs: osImage: - description: "Shortpath, main-debug, main-nightly, or release-stable" + description: "Shortpath or main-debug or release-stable" required: true outputs: osImage: - description: "Shortpath of input string, original input if that was already a shortpath" + description: "Shortpath of for input string, original input if that was already a shortpath" value: ${{ steps.set-output.outputs.osImage }} isDebugImage: - description: "Input is a debug image or not" + description: "Input represents a debug image or not" value: ${{ steps.set-output.outputs.isDebugImage }} runs: using: "composite" steps: - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRead aws-region: eu-central-1 @@ -27,7 +27,7 @@ runs: id: input-is-preset shell: bash run: | - if [[ "${{ inputs.osImage }}" == "ref/main/stream/debug/?" || "${{ inputs.osImage }}" == "ref/main/stream/nightly/?" || "${{ inputs.osImage }}" == "ref/release/stream/stable/?" ]]; then + if [[ "${{ inputs.osImage }}" == "ref/main/stream/debug/?" || "${{ inputs.osImage }}" == "ref/release/stream/stable/?" ]]; then echo "result=true" | tee -a "$GITHUB_OUTPUT" else echo "result=false" | tee -a "$GITHUB_OUTPUT" @@ -43,10 +43,6 @@ runs: echo "ref=$(echo $REFSTREAM | cut -d/ -f2)" | tee -a "$GITHUB_OUTPUT" echo "stream=$(echo $REFSTREAM | cut -d/ -f4)" | tee -a "$GITHUB_OUTPUT" - - name: Setup Bazel & Nix - if: steps.input-is-preset.outputs.result == 'true' - uses: ./.github/actions/setup_bazel_nix - - name: Find latest image if: steps.input-is-preset.outputs.result == 'true' id: find-latest-image diff --git a/.github/actions/setup_bazel_nix/action.yml b/.github/actions/setup_bazel_nix/action.yml index b560ac8f6..97fa8a756 100644 --- a/.github/actions/setup_bazel_nix/action.yml +++ b/.github/actions/setup_bazel_nix/action.yml @@ -3,9 +3,12 @@ description: Setup Bazel and Nix for CI builds and tests inputs: useCache: - description: "Cache Bazel artifacts. Use 'rbe' to enable with remote execution, and 'false' to disable." + description: "Cache Bazel artifacts. Use 'true' to enable with rw, 'readonly' to download, 'rbe' to enable with remote execution, 'log' to disable cache but upload logs, and 'false' to disable." default: "false" required: true + buildBuddyApiKey: + description: "BuildBuddy API key for caching Bazel artifacts" + required: false rbePlatform: description: "RBE platform to use. If empty, RBE will not be used." required: false @@ -22,8 +25,12 @@ runs: shell: bash run: | echo "::group::Check inputs" - if [[ "${{ inputs.useCache }}" != "rbe" && "${{ inputs.useCache }}" != "false" ]]; then - echo "Invalid value for 'useCache' input: '${{ inputs.useCache }}'. Must be 'rbe', or 'false'." + if [[ "${{ inputs.useCache }}" != "true" && "${{ inputs.useCache }}" != "readonly" && "${{ inputs.useCache }}" != "rbe" && "${{ inputs.useCache }}" != "logs" && "${{ inputs.useCache }}" != "false" ]]; then + echo "Invalid value for 'useCache' input: '${{ inputs.useCache }}'. Must be 'true', 'readonly', or 'false'." + exit 1 + fi + if [[ "${{ inputs.useCache }}" == "true" || "${{ inputs.useCache }}" == "readonly" || "${{ inputs.useCache }}" == "logs" ]] && [[ -z "${{ inputs.buildBuddyApiKey }}" ]]; then + echo "BuildBuddy API key is required when cache is enabled." exit 1 fi if [[ "${{ inputs.useCache }}" == "rbe" && -z "${{ inputs.rbePlatform }}" ]]; then @@ -75,7 +82,6 @@ runs: echo "$RUNNER_ARCH not supported" exit 1 fi - echo "nixVersion=$(cat "${{ github.workspace }}/.nixversion")" | tee -a "$GITHUB_OUTPUT" echo "::endgroup::" - name: Install current Bash on macOS @@ -114,9 +120,7 @@ runs: - name: Install nix if: steps.check_inputs.outputs.nixPreinstalled == 'false' - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 - with: - install_url: "https://releases.nixos.org/nix/nix-${{ steps.check_inputs.outputs.nixVersion }}/install" + uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25 - name: Set $USER if not set shell: bash @@ -178,6 +182,57 @@ runs: EOF echo "::endgroup::" + - name: Configure Bazel (rw) + if: inputs.useCache == 'true' || inputs.useCache == 'readonly' + shell: bash + env: + BUILDBUDDY_ORG_API_KEY: ${{ inputs.buildBuddyApiKey }} + WORKSPACE: ${{ github.workspace }} + run: | + echo "::group::Configure Bazel" + cat <> "${WORKSPACE}/.bazeloverwriterc" + common --bes_results_url=https://app.buildbuddy.io/invocation/ + common --bes_backend=grpcs://remote.buildbuddy.io + common --remote_cache=grpcs://remote.buildbuddy.io + common --remote_header=x-buildbuddy-api-key=${BUILDBUDDY_ORG_API_KEY} + cquery --bes_results_url= + cquery --bes_backend= + cquery --remote_cache= + query --bes_results_url= + query --bes_backend= + query --remote_cache= + EOF + echo "::endgroup::" + + - name: Configure Bazel (readonly) + if: inputs.useCache == 'readonly' + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + echo "::group::Configure Bazel (readonly)" + echo "common --remote_upload_local_results=false" >> "${WORKSPACE}/.bazeloverwriterc" + echo "::endgroup::" + + - name: Configure Bazel (logs) + if: inputs.useCache == 'logs' + shell: bash + env: + BUILDBUDDY_ORG_API_KEY: ${{ inputs.buildBuddyApiKey }} + WORKSPACE: ${{ github.workspace }} + run: | + echo "::group::Configure Bazel" + cat <> "${WORKSPACE}/.bazeloverwriterc" + common --bes_results_url=https://app.buildbuddy.io/invocation/ + common --bes_backend=grpcs://remote.buildbuddy.io + common --remote_header=x-buildbuddy-api-key=${BUILDBUDDY_ORG_API_KEY} + cquery --bes_results_url= + cquery --bes_backend= + query --bes_results_url= + query --bes_backend= + EOF + echo "::endgroup::" + - name: Configure Bazel (rbe) if: inputs.useCache == 'rbe' shell: bash @@ -192,6 +247,24 @@ runs: common --repo_env=GOPROXY=http://goproxy:3000 EOF echo "::endgroup::" + - name: Configure Bazel (rbe logs) + if: inputs.useCache == 'rbe' && inputs.buildBuddyApiKey != '' + shell: bash + env: + BUILDBUDDY_ORG_API_KEY: ${{ inputs.buildBuddyApiKey }} + WORKSPACE: ${{ github.workspace }} + run: | + echo "::group::Configure Bazel" + cat <> "${WORKSPACE}/.bazeloverwriterc" + common --bes_results_url=https://app.buildbuddy.io/invocation/ + common --bes_backend=grpcs://remote.buildbuddy.io + common --remote_header=x-buildbuddy-api-key=${BUILDBUDDY_ORG_API_KEY} + cquery --bes_results_url= + cquery --bes_backend= + query --bes_results_url= + query --bes_backend= + EOF + echo "::endgroup::" - name: Disable disk cache on GitHub Actions runners if: startsWith(runner.name , 'GitHub Actions') @@ -221,7 +294,7 @@ runs: { tools, repository, rev }: let repoFlake = builtins.getFlake ("github:" + repository + "/" + rev); - nixpkgs = repoFlake.inputs.nixpkgs; + nixpkgs = repoFlake.inputs.nixpkgsUnstable; pkgs = import nixpkgs { system = builtins.currentSystem; }; toolPkgs = map (p: pkgs.${p}) tools; in diff --git a/.github/actions/terraform_apply/action.yml b/.github/actions/terraform_apply/action.yml index edf4fb26f..f66b18ace 100644 --- a/.github/actions/terraform_apply/action.yml +++ b/.github/actions/terraform_apply/action.yml @@ -26,12 +26,6 @@ runs: "gcpSEVES") attestationVariant="gcp-sev-es" ;; - "gcpSEVSNP") - attestationVariant="gcp-sev-snp" - ;; - "qemuVTPM") - attestationVariant="qemu-vtpm" - ;; *) echo "Unknown attestation variant: $(yq '.attestation | keys | .[0]' constellation-conf.yaml)" exit 1 @@ -47,7 +41,7 @@ runs: } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -109,16 +103,6 @@ runs: project_id = "$(yq '.infrastructure.gcp.projectID' constellation-state.yaml)" service_account_key = sensitive("$(cat $(yq '.provider.gcp.serviceAccountKeyPath' constellation-conf.yaml) | base64 -w0)") } - openstack = { - cloud = "stackit" - clouds_yaml_path = "~/.config/openstack/clouds.yaml" - floating_ip_pool_id = "970ace5c-458f-484a-a660-0903bcfd91ad" - deploy_yawol_load_balancer = true - yawol_image_id = "bcd6c13e-75d1-4c3f-bf0f-8f83580cc1be" - yawol_flavor_id = "3b11b27e-6c73-470d-b595-1d85b95a8cdf" - network_id = "$(yq '.infrastructure.networkID' constellation-state.yaml)" - subnet_id = "$(yq '.infrastructure.subnetID' constellation-state.yaml)" - } network_config = { ip_cidr_node = "$(yq '.infrastructure.ipCidrNode' constellation-state.yaml)" ip_cidr_service = "$(yq '.serviceCIDR' constellation-conf.yaml)" diff --git a/.github/actions/update_tfstate/action.yml b/.github/actions/update_tfstate/action.yml deleted file mode 100644 index 59aab2f04..000000000 --- a/.github/actions/update_tfstate/action.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Update TFState -description: "Update the terraform state artifact. We use this to either delete an artifact if the e2e test was cleaned up successfully or to update the artifact with the latest terraform state." - -inputs: - name: - description: "The name of the artifact that contains the tfstate." - required: true - runID: - description: "The ID of your current run (github.run_id)." - required: true - encryptionSecret: - description: "The encryption secret for the artifacts." - required: true - -runs: - using: "composite" - steps: - - name: Check if uploaded tfstate can be deleted - if: always() - shell: bash - run: | - if [[ ! -d constellation-terraform ]] && [[ ! -d constellation-iam-terraform ]]; then - echo "DELETE_TF_STATE=true" >> "$GITHUB_ENV" - else - echo "DELETE_TF_STATE=false" >> "$GITHUB_ENV" - fi - - - name: Delete tfstate artifact if necessary - if: always() && env.DELETE_TF_STATE == 'true' - uses: ./.github/actions/artifact_delete - with: - name: ${{ inputs.name }} - workflowID: ${{ inputs.runID }} - - - name: Prepare left over terraform state folders - if: always() && env.DELETE_TF_STATE == 'false' - shell: bash - run: | - rm -rf to-zip/* - mkdir -p to-zip - - to_upload="" - if [[ -d constellation-terraform ]]; then - cp -r constellation-terraform to-zip - rm -f to-zip/constellation-terraform/plan.zip - rm -rf to-zip/constellation-terraform/.terraform - to_upload+="to-zip/constellation-terraform" - fi - if [[ -d constellation-iam-terraform ]]; then - cp -r constellation-iam-terraform to-zip - rm -rf to-zip/constellation-iam-terraform/.terraform - to_upload+=" to-zip/constellation-iam-terraform" - fi - echo "TO_UPLOAD=$to_upload" >> "$GITHUB_ENV" - - - name: Update tfstate - if: always() && env.TO_UPLOAD != '' - uses: ./.github/actions/artifact_upload - with: - name: ${{ inputs.name }} - path: > - ${{ env.TO_UPLOAD }} - encryptionSecret: ${{ inputs.encryptionSecret }} - overwrite: true diff --git a/.github/actions/upload_terraform_module/action.yml b/.github/actions/upload_terraform_module/action.yml index 140844fdd..2a40a322a 100644 --- a/.github/actions/upload_terraform_module/action.yml +++ b/.github/actions/upload_terraform_module/action.yml @@ -15,7 +15,7 @@ runs: zip -r terraform-module.zip terraform-module - name: Upload artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: terraform-module path: terraform-module.zip @@ -23,4 +23,4 @@ runs: - name: Cleanup Terraform module dir shell: bash run: | - rm -rf terraform-module terraform-module.zip + rm -r terraform-module terraform-module.zip diff --git a/.github/actions/versionsapi/Dockerfile b/.github/actions/versionsapi/Dockerfile new file mode 100644 index 000000000..b1018466a --- /dev/null +++ b/.github/actions/versionsapi/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 as builder + +# Download project root dependencies +WORKDIR /workspace +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +COPY . . + +# Build +WORKDIR /workspace/internal/api/versionsapi/cli +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o versionsapi . + +FROM scratch as release + +COPY --from=builder /workspace/internal/api/versionsapi/cli/versionsapi . + +CMD ["/notIntendedToBeExecuted"] diff --git a/.github/actions/versionsapi/action.yml b/.github/actions/versionsapi/action.yml index 817064334..fd236dec1 100644 --- a/.github/actions/versionsapi/action.yml +++ b/.github/actions/versionsapi/action.yml @@ -52,12 +52,19 @@ outputs: runs: using: composite steps: + - name: Get versionsapi binary + shell: bash + # TODO: This should probably be `bazel run`. + run: | + containerID=$(docker create "ghcr.io/edgelesssys/constellation/versionsapi-ci-cli:latest") + docker cp ${containerID}:/versionsapi . + - name: Run versionsapi id: run shell: bash run: | out=$( - bazel run //internal/api/versionsapi/cli:cli -- \ + ./versionsapi \ ${{ inputs.command }} \ ${{ inputs.ref != '' && format('--ref="{0}"', inputs.ref) || '' }} \ ${{ inputs.stream != '' && format('--stream="{0}"', inputs.stream) || '' }} \ diff --git a/.github/actions/notify_teams/teams_payload_template.json b/.github/teams_payload_template.json similarity index 76% rename from .github/actions/notify_teams/teams_payload_template.json rename to .github/teams_payload_template.json index 0354bc07b..145d6a28c 100644 --- a/.github/actions/notify_teams/teams_payload_template.json +++ b/.github/teams_payload_template.json @@ -1,5 +1,5 @@ { - "type": "AdaptiveCard", + "type": "message", "attachments": [ { "contentType": "application/vnd.microsoft.card.adaptive", @@ -11,6 +11,14 @@ "msteams": { "width": "Full", "entities": [ + { + "type": "mention", + "text": "elchead", + "mentioned": { + "id": "3931943b-8d4b-4300-ac7e-bbb06c4da27f", + "name": "Adrian Stobbe" + } + }, { "type": "mention", "text": "msanft", @@ -19,6 +27,14 @@ "name": "Moritz Sanft" } }, + { + "type": "mention", + "text": "3u13r", + "mentioned": { + "id": "26869b29-b0d6-48f8-a9ed-7a6374410a53", + "name": "Leonard Cohnen" + } + }, { "type": "mention", "text": "daniel-weisse", @@ -37,10 +53,18 @@ }, { "type": "mention", - "text": "burgerdev", + "text": "derpsteb", "mentioned": { - "id": "c9efc581-58ca-4da6-93ce-79f69f89deeb", - "name": "Markus Rudy" + "id": "a9a34611-9a38-4c00-a8a2-f87d94c2bf7d", + "name": "Otto Bittner" + } + }, + { + "type": "mention", + "text": "malt3", + "mentioned": { + "id": "3012fe21-cff7-499d-88cf-48cf12f2e90c", + "name": "Malte Poll" } } ] diff --git a/.github/workflows/assign_reviewer.yml b/.github/workflows/assign_reviewer.yml deleted file mode 100644 index ed87296d8..000000000 --- a/.github/workflows/assign_reviewer.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Assign Reviewer - -on: - pull_request: - types: - - opened - - reopened - - edited - - synchronize - - review_request_removed - - labeled - -permissions: - pull-requests: write - -jobs: - assign_reviewer: - runs-on: ubuntu-latest - if: contains(github.event.pull_request.labels.*.name, 'dependencies') && toJson(github.event.pull_request.requested_reviewers) == '[]' && github.event.pull_request.user.login == 'renovate[bot]' - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Pick assignee - id: pick-assignee - uses: ./.github/actions/pick_assignee - - name: Assign reviewer - env: - GH_TOKEN: ${{ github.token }} - PR: ${{ github.event.pull_request.number }} - ASSIGNEE: ${{ steps.pick-assignee.outputs.assignee }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/edgelesssys/constellation/pulls/${PR}/requested_reviewers" \ - -f "reviewers[]=${ASSIGNEE}" diff --git a/.github/workflows/aws-snp-launchmeasurement.yml b/.github/workflows/aws-snp-launchmeasurement.yml index d2483d71c..49479bcda 100644 --- a/.github/workflows/aws-snp-launchmeasurement.yml +++ b/.github/workflows/aws-snp-launchmeasurement.yml @@ -8,20 +8,26 @@ on: jobs: run: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.head_ref }} path: constellation + - name: Install necessary tools + run: | + sudo apt-get update + sudo apt-get install -y python3 python3-pip + sudo python3 -m pip install --user --require-hashes -r constellation/.github/workflows/aws-snp-launchmeasurements-requirements.txt + - name: Install Nix - uses: cachix/install-nix-action@17fe5fb4a23ad6cbbe47d6b3f359611ad276644c # v31 + uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25 - name: Download Firmware release id: download-firmware - uses: robinraju/release-downloader@daf26c55d821e836577a15f77d86ddc078948b05 # v1.12 + uses: robinraju/release-downloader@368754b9c6f47c345fcfbf42bcb577c2f0f5f395 # v1.9 with: repository: aws/uefi latest: true @@ -44,7 +50,7 @@ jobs: echo "ovmfPath=${ovmfPath}" | tee -a "$GITHUB_OUTPUT" popd || exit 1 - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: virtee/sev-snp-measure-go.git ref: e42b6f8991ed5a671d5d1e02a6b61f6373f9f8d8 diff --git a/.github/workflows/aws-snp-launchmeasurements-requirements.txt b/.github/workflows/aws-snp-launchmeasurements-requirements.txt new file mode 100644 index 000000000..6d4195056 --- /dev/null +++ b/.github/workflows/aws-snp-launchmeasurements-requirements.txt @@ -0,0 +1,106 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --generate-hashes --output-file=aws-snp-launchmeasurements-requirements.txt input.txt +# +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 + # via cryptography +cryptography==42.0.4 \ + --hash=sha256:01911714117642a3f1792c7f376db572aadadbafcd8d75bb527166009c9f1d1b \ + --hash=sha256:0e89f7b84f421c56e7ff69f11c441ebda73b8a8e6488d322ef71746224c20fce \ + --hash=sha256:12d341bd42cdb7d4937b0cabbdf2a94f949413ac4504904d0cdbdce4a22cbf88 \ + --hash=sha256:15a1fb843c48b4a604663fa30af60818cd28f895572386e5f9b8a665874c26e7 \ + --hash=sha256:1cdcdbd117681c88d717437ada72bdd5be9de117f96e3f4d50dab3f59fd9ab20 \ + --hash=sha256:1df6fcbf60560d2113b5ed90f072dc0b108d64750d4cbd46a21ec882c7aefce9 \ + --hash=sha256:3c6048f217533d89f2f8f4f0fe3044bf0b2090453b7b73d0b77db47b80af8dff \ + --hash=sha256:3e970a2119507d0b104f0a8e281521ad28fc26f2820687b3436b8c9a5fcf20d1 \ + --hash=sha256:44a64043f743485925d3bcac548d05df0f9bb445c5fcca6681889c7c3ab12764 \ + --hash=sha256:4e36685cb634af55e0677d435d425043967ac2f3790ec652b2b88ad03b85c27b \ + --hash=sha256:5f8907fcf57392cd917892ae83708761c6ff3c37a8e835d7246ff0ad251d9298 \ + --hash=sha256:69b22ab6506a3fe483d67d1ed878e1602bdd5912a134e6202c1ec672233241c1 \ + --hash=sha256:6bfadd884e7280df24d26f2186e4e07556a05d37393b0f220a840b083dc6a824 \ + --hash=sha256:6d0fbe73728c44ca3a241eff9aefe6496ab2656d6e7a4ea2459865f2e8613257 \ + --hash=sha256:6ffb03d419edcab93b4b19c22ee80c007fb2d708429cecebf1dd3258956a563a \ + --hash=sha256:810bcf151caefc03e51a3d61e53335cd5c7316c0a105cc695f0959f2c638b129 \ + --hash=sha256:831a4b37accef30cccd34fcb916a5d7b5be3cbbe27268a02832c3e450aea39cb \ + --hash=sha256:887623fe0d70f48ab3f5e4dbf234986b1329a64c066d719432d0698522749929 \ + --hash=sha256:a0298bdc6e98ca21382afe914c642620370ce0470a01e1bef6dd9b5354c36854 \ + --hash=sha256:a1327f280c824ff7885bdeef8578f74690e9079267c1c8bd7dc5cc5aa065ae52 \ + --hash=sha256:c1f25b252d2c87088abc8bbc4f1ecbf7c919e05508a7e8628e6875c40bc70923 \ + --hash=sha256:c3a5cbc620e1e17009f30dd34cb0d85c987afd21c41a74352d1719be33380885 \ + --hash=sha256:ce8613beaffc7c14f091497346ef117c1798c202b01153a8cc7b8e2ebaaf41c0 \ + --hash=sha256:d2a27aca5597c8a71abbe10209184e1a8e91c1fd470b5070a2ea60cafec35bcd \ + --hash=sha256:dad9c385ba8ee025bb0d856714f71d7840020fe176ae0229de618f14dae7a6e2 \ + --hash=sha256:db4b65b02f59035037fde0998974d84244a64c3265bdef32a827ab9b63d61b18 \ + --hash=sha256:e09469a2cec88fb7b078e16d4adec594414397e8879a4341c6ace96013463d5b \ + --hash=sha256:e53dc41cda40b248ebc40b83b31516487f7db95ab8ceac1f042626bc43a2f992 \ + --hash=sha256:f1e85a178384bf19e36779d91ff35c7617c885da487d689b05c1366f9933ad74 \ + --hash=sha256:f47be41843200f7faec0683ad751e5ef11b9a56a220d57f300376cd8aba81660 \ + --hash=sha256:fb0cef872d8193e487fc6bdb08559c3aa41b659a7d9be48b2e10747f47863925 \ + --hash=sha256:ffc73996c4fca3d2b6c1c8c12bfd3ad00def8621da24f547626bf06441400449 + # via sev-snp-measure +pycparser==2.21 \ + --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ + --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 + # via cffi +sev-snp-measure==0.0.9 \ + --hash=sha256:32ac67a0db6b639186116d8806a730aac4743584e6ca810c65e8fc57b875f87d \ + --hash=sha256:a1796822e15430c2db7749d1da269819b8cec1330600bb5589ed0ed61400dc41 + # via -r input.txt +types-cryptography==3.3.23.2 \ + --hash=sha256:09cc53f273dd4d8c29fa7ad11fefd9b734126d467960162397bc5e3e604dea75 \ + --hash=sha256:b965d548f148f8e87f353ccf2b7bd92719fdf6c845ff7cedf2abb393a0643e4f + # via sev-snp-measure diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml index a9ed3b89b..f2a5a8642 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yml @@ -22,7 +22,7 @@ jobs: runs-on: [arc-runner-set] steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -31,6 +31,7 @@ jobs: with: useCache: "rbe" rbePlatform: "ubuntu-22.04" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Build all shell: bash diff --git a/.github/workflows/build-ccm-gcp.yml b/.github/workflows/build-ccm-gcp.yml index b84514a1c..52d33a5af 100644 --- a/.github/workflows/build-ccm-gcp.yml +++ b/.github/workflows/build-ccm-gcp.yml @@ -13,30 +13,30 @@ on: jobs: find-ccm-versions: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 outputs: versions: ${{ steps.find-versions.outputs.versions }} latest: ${{ steps.find-latest.outputs.latest }} steps: - name: Checkout Constellation - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Checkout kubernetes/cloud-provider-gcp - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: "kubernetes/cloud-provider-gcp" path: "cloud-provider-gcp" fetch-depth: 0 - name: Setup Go environment - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: "1.24.3" + go-version: "1.22.1" cache: false - name: Install Crane run: | - go install github.com/google/go-containerregistry/cmd/crane@c195f151efe3369874c72662cd69ad43ee485128 # v0.20.2 + go install github.com/google/go-containerregistry/cmd/crane@latest - name: Find versions id: find-versions @@ -54,7 +54,7 @@ jobs: build-ccm-gcp: # matrix cannot handle empty lists if: needs.find-ccm-versions.outputs.versions != '[]' - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: read packages: write @@ -65,10 +65,10 @@ jobs: version: ${{ fromJson(needs.find-ccm-versions.outputs.versions) }} steps: - name: Checkout Constellation - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Checkout kubernetes/cloud-provider-gcp - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: "kubernetes/cloud-provider-gcp" path: "cloud-provider-gcp" @@ -76,7 +76,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 with: images: | ghcr.io/edgelesssys/cloud-provider-gcp @@ -113,7 +113,7 @@ jobs: - name: Build and push container image id: build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: context: ./cloud-provider-gcp push: ${{ github.ref_name == 'main' }} diff --git a/.github/workflows/build-gcp-guest-agent.yml b/.github/workflows/build-gcp-guest-agent.yml index 4fab1d2c4..9c1ca24c6 100644 --- a/.github/workflows/build-gcp-guest-agent.yml +++ b/.github/workflows/build-gcp-guest-agent.yml @@ -10,7 +10,7 @@ env: jobs: build-gcp-guest-agent: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: read packages: write @@ -69,7 +69,7 @@ jobs: - name: Checkout GoogleCloudPlatform/guest-agent if: steps.needs-build.outputs.out == 'true' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: "GoogleCloudPlatform/guest-agent" ref: refs/tags/${{ steps.latest-release.outputs.latest }} @@ -77,7 +77,7 @@ jobs: - name: Checkout Constellation if: steps.needs-build.outputs.out == 'true' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: path: "constellation" ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -85,7 +85,7 @@ jobs: - name: Docker meta id: meta if: steps.needs-build.outputs.out == 'true' - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 with: images: | ${{ env.REGISTRY }}/edgelesssys/gcp-guest-agent @@ -114,7 +114,7 @@ jobs: - name: Build and push container image if: steps.needs-build.outputs.out == 'true' id: build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: context: ./guest-agent file: ./constellation/3rdparty/gcp-guest-agent/Dockerfile diff --git a/.github/workflows/build-libvirt-container.yml b/.github/workflows/build-libvirt-container.yml index 625d6939d..da69a774a 100644 --- a/.github/workflows/build-libvirt-container.yml +++ b/.github/workflows/build-libvirt-container.yml @@ -13,17 +13,18 @@ on: jobs: build-container: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: read packages: write steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup bazel uses: ./.github/actions/setup_bazel_nix with: + useCache: "false" nixTools: | crane gzip diff --git a/.github/workflows/build-logcollector-images.yml b/.github/workflows/build-logcollector-images.yml index 15517975f..6c7b6a9f2 100644 --- a/.github/workflows/build-logcollector-images.yml +++ b/.github/workflows/build-logcollector-images.yml @@ -13,14 +13,14 @@ on: jobs: build-logcollector-debugd-images: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: read packages: write steps: - name: Check out repository id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} diff --git a/.github/workflows/build-os-image-scheduled.yml b/.github/workflows/build-os-image-scheduled.yml index e42c2ebfa..5e3d79c45 100644 --- a/.github/workflows/build-os-image-scheduled.yml +++ b/.github/workflows/build-os-image-scheduled.yml @@ -4,15 +4,15 @@ on: workflow_dispatch: schedule: - cron: "0 21 * * 2" # At 21:00 on Tuesday. + - cron: "10 21 * * 2" # At 21:10 on Tuesday. - cron: "20 21 * * 2" # At 21:20 on Tuesday. - - cron: "40 21 * * 2" # At 21:40 on Tuesday. - cron: "0 21 * * 4" # At 21:00 on Thursday. + - cron: "10 21 * * 4" # At 21:10 on Thursday. - cron: "20 21 * * 4" # At 21:20 on Thursday. - - cron: "40 21 * * 4" # At 21:40 on Thursday. jobs: stream: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 outputs: stream: ${{ steps.stream.outputs.stream }} steps: @@ -28,10 +28,10 @@ jobs: "0 21 * * 4" | "0 21 * * 2") echo "stream=debug" | tee -a "$GITHUB_OUTPUT" ;; - "20 21 * * 4" | "20 21 * * 2") + "10 21 * * 4" | "10 21 * * 2") echo "stream=console" | tee -a "$GITHUB_OUTPUT" ;; - "40 21 * * 4" | "40 21 * * 2") + "20 21 * * 4" | "20 21 * * 2") echo "stream=nightly" | tee -a "$GITHUB_OUTPUT" ;; *) @@ -54,20 +54,22 @@ jobs: update-code: # On nightly stream only. - if: needs.stream.outputs.stream == 'nightly' - needs: ["build-image", "stream"] - runs-on: ubuntu-24.04 + if: | + github.event_name == 'workflow_dispatch' || + github.event.schedule == '20 21 * * 4' || + github.event.schedule == '20 21 * * 2' + needs: build-image + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.head_ref }} - token: ${{ secrets.CI_COMMIT_PUSH_PR }} - name: Setup Go environment - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: "1.24.3" + go-version: "1.22.1" cache: false - name: Determine version @@ -97,7 +99,7 @@ jobs: run: rm -f internal/attestation/measurements/measurement-generator/generate - name: Create pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + uses: peter-evans/create-pull-request@b1ddad2c994a25fbc81a28b3ec0e368bb2021c50 # v6.0.0 with: branch: "image/automated/update-measurements-${{ github.run_number }}" base: main @@ -109,7 +111,6 @@ jobs: It updates the hardcoded measurements and the image version (for QEMU/MiniConstellation). commit-message: "image: update measurements and image version" committer: edgelessci - author: edgelessci labels: no changelog # We need to push changes using a token, otherwise triggers like on:push and on:pull_request won't work. token: ${{ !github.event.pull_request.head.repo.fork && secrets.CI_COMMIT_PUSH_PR || '' }} @@ -117,10 +118,10 @@ jobs: notify-failure: if: failure() needs: [ "stream", "build-image", "update-code" ] - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.head_ref }} diff --git a/.github/workflows/build-os-image.yml b/.github/workflows/build-os-image.yml index 50783089a..262be65e0 100644 --- a/.github/workflows/build-os-image.yml +++ b/.github/workflows/build-os-image.yml @@ -47,7 +47,7 @@ on: jobs: build-settings: name: "Determine build settings" - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 outputs: ref: ${{ steps.ref.outputs.ref }} stream: ${{ steps.stream.outputs.stream }} @@ -59,7 +59,7 @@ jobs: cliApiBasePath: ${{ steps.image-version.outputs.cliApiBasePath }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -138,7 +138,7 @@ jobs: contents: read steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -147,7 +147,7 @@ jobs: useCache: "false" - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GitHubConstellationImagePipeline aws-region: eu-central-1 @@ -167,12 +167,6 @@ jobs: with: clouds_yaml: ${{ secrets.STACKIT_IMAGE_UPLOAD_CLOUDS_YAML }} - - name: Allow unrestricted user namespaces - shell: bash - run: | - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 - - name: Build and upload id: build shell: bash diff --git a/.github/workflows/build-versionsapi-ci-image.yml b/.github/workflows/build-versionsapi-ci-image.yml new file mode 100644 index 000000000..8a8295e9d --- /dev/null +++ b/.github/workflows/build-versionsapi-ci-image.yml @@ -0,0 +1,32 @@ +name: Build and upload versionsapi CI image + +on: + workflow_dispatch: + push: + branches: + - main + paths: + - "internal/api/versionsapi/**" + - ".github/workflows/build-versionsapi-ci-image.yml" + - ".github/actions/versionsapi/**" + - "go.mod" + +jobs: + build-versionsapi-ci-cli: + runs-on: ubuntu-22.04 + permissions: + contents: read + packages: write + steps: + - name: Check out repository + id: checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} + + - name: Build and upload container image + uses: ./.github/actions/build_micro_service + with: + name: versionsapi-ci-cli + dockerfile: .github/actions/versionsapi/Dockerfile + githubToken: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 598a64445..bf1a84c17 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -17,15 +17,15 @@ on: jobs: linkChecker: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Link Checker - uses: lycheeverse/lychee-action@82202e5e9c2f4ef1a55a3d02563e1cb6041e5332 # v2.4.1 + uses: lycheeverse/lychee-action@c053181aa0c3d17606addfe97a9075a32723548a # v1.9.3 with: args: "--config ./.lychee.toml './**/*.md' './**/*.html'" fail: true diff --git a/.github/workflows/check-measurements-reproducibility.yml b/.github/workflows/check-measurements-reproducibility.yml deleted file mode 100644 index 3a91eda81..000000000 --- a/.github/workflows/check-measurements-reproducibility.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Check measurements reproducibility -on: - workflow_dispatch: - inputs: - version: - type: string - description: The version of the measurements that are downloaded from the CDN. - required: true - ref: - type: string - description: The git ref to check out. You probably want this to be the tag of the release you are testing. - required: true - -jobs: - check-reproducibility: - runs-on: ubuntu-24.04 - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: ${{ inputs.ref || github.ref }} - - - name: Check reproducibility - uses: ./.github/actions/check_measurements_reproducibility - with: - version: ${{ github.event.inputs.version }} - ref: ${{ github.event.inputs.ref }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8c77ddacb..de17bf19c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -17,7 +17,7 @@ on: jobs: codeql: name: CodeQL - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 env: # Force CodeQL to run the extraction on the files compiled by our custom # build command, as opposed to letting the autobuilder figure it out. @@ -34,17 +34,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Go environment if: matrix.language == 'go' - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: "1.24.3" + go-version: "1.22.1" cache: false - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/init@cf7e9f23492505046de9a37830c3711dd0f25bb3 # v2.16.2 with: languages: ${{ matrix.language }} @@ -63,6 +63,6 @@ jobs: echo "::endgroup::" - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/analyze@cf7e9f23492505046de9a37830c3711dd0f25bb3 # v2.16.2 with: category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/docs-vale.yml b/.github/workflows/docs-vale.yml index bb6331ce5..9feacba51 100644 --- a/.github/workflows/docs-vale.yml +++ b/.github/workflows/docs-vale.yml @@ -13,20 +13,15 @@ on: jobs: vale: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - # Work around https://github.com/errata-ai/vale-action/issues/128. - - run: | - venv="$HOME/.local/share/venv" - python3 -m venv "$venv" - echo "$venv/bin" >> "$GITHUB_PATH" + - name: Vale - uses: errata-ai/vale-action@2690bc95f0ed3cb5220492575af09c51b04fbea9 # tag=reviewdog + uses: errata-ai/vale-action@3f7188c866bcb3259339a09f517d7c4a8838303c # tag=reviewdog with: files: docs/docs fail_on_error: true - version: 3.9.3 diff --git a/.github/workflows/draft-release.yml b/.github/workflows/draft-release.yml index 84b696afb..fa0821e3d 100644 --- a/.github/workflows/draft-release.yml +++ b/.github/workflows/draft-release.yml @@ -50,7 +50,7 @@ on: jobs: build-cli: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: @@ -72,7 +72,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -92,8 +92,8 @@ jobs: cosignPassword: ${{ inputs.key == 'release' && secrets.COSIGN_PASSWORD || secrets.COSIGN_DEV_PASSWORD }} - name: Upload CLI as artifact (unix) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: ${{ matrix.os != 'windows' }} + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + if : ${{ matrix.os != 'windows' }} with: name: constellation-${{ matrix.os }}-${{ matrix.arch }} path: | @@ -101,8 +101,8 @@ jobs: build/constellation-${{ matrix.os }}-${{ matrix.arch }}.sig - name: Upload CLI as artifact (windows) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: ${{ matrix.os == 'windows' }} + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + if : ${{ matrix.os == 'windows' }} with: name: constellation-${{ matrix.os }}-${{ matrix.arch }} path: | @@ -110,7 +110,7 @@ jobs: build/constellation-${{ matrix.os }}-${{ matrix.arch }}.exe.sig build-terraform-provider: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: @@ -133,7 +133,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -149,27 +149,27 @@ jobs: targetArch: ${{ matrix.arch }} - name: Upload Terraform Provider Binary as artifact (unix) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: ${{ matrix.os != 'windows' }} + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + if : ${{ matrix.os != 'windows' }} with: name: terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} path: | build/terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} - name: Upload Terraform Provider Binary as artifact (windows) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - if: ${{ matrix.os == 'windows' }} + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + if : ${{ matrix.os == 'windows' }} with: name: terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }} path: | build/terraform-provider-constellation-${{ matrix.os }}-${{ matrix.arch }}.exe upload-terraform-module: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -177,7 +177,7 @@ jobs: uses: ./.github/actions/upload_terraform_module push-containers: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 if: inputs.pushContainers permissions: actions: read @@ -187,7 +187,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -208,7 +208,7 @@ jobs: run: bazel run //bazel/release:push provenance-subjects: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 needs: - build-cli - signed-sbom @@ -219,7 +219,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -227,7 +227,7 @@ jobs: uses: ./.github/actions/download_release_binaries - name: Download CLI SBOM - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation.spdx.sbom @@ -252,16 +252,16 @@ jobs: echo provenance-subjects="${HASHESB64}" >> "$GITHUB_OUTPUT" signed-sbom: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} - name: Install Cosign - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 + uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0 - name: Download Syft & Grype uses: ./.github/actions/install_syft_grype @@ -296,13 +296,13 @@ jobs: COSIGN_PASSWORD: ${{ inputs.key == 'release' && secrets.COSIGN_PASSWORD || secrets.COSIGN_DEV_PASSWORD }} - name: Upload Constellation CLI SBOM - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: constellation.spdx.sbom path: constellation.spdx.sbom - name: Upload Constellation CLI SBOM's signature - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: constellation.spdx.sbom.sig path: constellation.spdx.sbom.sig @@ -316,14 +316,14 @@ jobs: - provenance-subjects # This must not be pinned to digest. See: # https://github.com/slsa-framework/slsa-github-generator#referencing-slsa-builders-and-generators - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0 with: base64-subjects: "${{ needs.provenance-subjects.outputs.provenance-subjects }}" provenance-verify: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 env: - SLSA_VERIFIER_VERSION: "2.7.0" + SLSA_VERIFIER_VERSION: "2.5.1" needs: - build-cli - provenance @@ -332,7 +332,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -340,12 +340,14 @@ jobs: uses: ./.github/actions/download_release_binaries - name: Download CLI SBOM - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation.spdx.sbom - name: Download provenance - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + # Need to use the same major version as slsa-github-generator to find uploaded artifacts + # https://github.com/slsa-framework/slsa-github-generator/issues/3068 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: ${{ needs.provenance.outputs.provenance-name }} @@ -395,7 +397,7 @@ jobs: release: permissions: contents: write - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 needs: - build-cli - provenance @@ -405,7 +407,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.head_ref }} @@ -418,17 +420,19 @@ jobs: uses: ./.github/actions/download_release_binaries - name: Download CLI SBOM - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation.spdx.sbom - name: Download Constellation CLI SBOM's signature - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation.spdx.sbom.sig - name: Download Constellation provenance - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + # Need to use the same major version as slsa-github-generator to find uploaded artifacts + # https://github.com/slsa-framework/slsa-github-generator/issues/3068 + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 with: name: ${{ needs.provenance.outputs.provenance-name }} @@ -472,7 +476,7 @@ jobs: - name: Create release with artifacts id: create-release # GitHub endorsed release project. See: https://github.com/actions/create-release - uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2.2.2 + uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 with: draft: true generate_release_notes: true @@ -487,7 +491,7 @@ jobs: terraform-module.zip - name: Create Terraform provider release with artifcats - uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2.2.2 + uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15 with: draft: true generate_release_notes: false diff --git a/.github/workflows/e2e-attestationconfigapi.yml b/.github/workflows/e2e-attestationconfigapi.yml index 3c3d233c1..a3605dafc 100644 --- a/.github/workflows/e2e-attestationconfigapi.yml +++ b/.github/workflows/e2e-attestationconfigapi.yml @@ -10,6 +10,11 @@ on: - "internal/api/**" - ".github/workflows/e2e-attestationconfigapi.yml" - "go.mod" + pull_request: + paths: + - "internal/api/**" + - ".github/workflows/e2e-attestationconfigapi.yml" + - "go.mod" jobs: e2e-api: @@ -17,8 +22,8 @@ jobs: fail-fast: false max-parallel: 1 matrix: - attestationVariant: ["azure-sev-snp", "azure-tdx", "aws-sev-snp", "gcp-sev-snp"] - runs-on: ubuntu-24.04 + csp: ["azure", "aws"] + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -26,7 +31,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: # Don't trigger in forks, use head on pull requests, use default otherwise. ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || github.event.pull_request.head.sha || '' }} @@ -34,6 +39,7 @@ jobs: - name: Run Attestationconfig API E2E uses: ./.github/actions/e2e_attestationconfigapi with: + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} cosignPrivateKey: ${{ secrets.COSIGN_DEV_PRIVATE_KEY }} cosignPassword: ${{ secrets.COSIGN_DEV_PASSWORD }} - attestationVariant: ${{ matrix.attestationVariant }} + csp: ${{ matrix.csp }} diff --git a/.github/workflows/e2e-cleanup.yml b/.github/workflows/e2e-cleanup.yml deleted file mode 100644 index 67e5bf0ff..000000000 --- a/.github/workflows/e2e-cleanup.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: e2e cleanup - -on: - schedule: - - cron: "0 0 * * *" # At 00:00 every day - workflow_dispatch: - - -jobs: - cleanup: - runs-on: ubuntu-latest - permissions: - actions: read - id-token: write - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Cleanup - uses: ./.github/actions/e2e_cleanup_timeframe - with: - ghToken: ${{ secrets.GITHUB_TOKEN }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - azure_credentials: ${{ secrets.AZURE_E2E_DESTROY_CREDENTIALS }} - openStackCloudsYaml: ${{ secrets.STACKIT_CI_CLOUDS_YAML }} - stackitUat: ${{ secrets.STACKIT_CI_UAT }} diff --git a/.github/workflows/e2e-mini.yml b/.github/workflows/e2e-mini.yml index bf0cf1cad..3861a7a7c 100644 --- a/.github/workflows/e2e-mini.yml +++ b/.github/workflows/e2e-mini.yml @@ -20,7 +20,7 @@ on: jobs: e2e-mini: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 environment: e2e permissions: id-token: write @@ -29,12 +29,12 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.ref || github.event.workflow_run.head_branch || github.head_ref }} - name: Azure login OIDC - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 + uses: azure/login@cb79c773a3cfa27f31f25eb3f677781210c9ce3d # v1.6.1 with: client-id: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -46,6 +46,6 @@ jobs: azureClientID: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} azureTenantID: ${{ secrets.AZURE_TENANT_ID }} - azureIAMCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} registry: ghcr.io githubToken: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/e2e-test-daily.yml b/.github/workflows/e2e-test-daily.yml index f7ea0ad11..c36923a97 100644 --- a/.github/workflows/e2e-test-daily.yml +++ b/.github/workflows/e2e-test-daily.yml @@ -12,7 +12,7 @@ jobs: matrix: refStream: ["ref/main/stream/debug/?", "ref/release/stream/stable/?"] name: Find latest image - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -21,7 +21,7 @@ jobs: image-release-stable: ${{ steps.relabel-output.outputs.image-release-stable }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -45,21 +45,20 @@ jobs: fail-fast: false max-parallel: 5 matrix: - kubernetesVersion: ["v1.31"] # This should correspond to the current default k8s minor. - attestationVariant: ["gcp-sev-es", "gcp-sev-snp", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] + kubernetesVersion: ["1.28"] # should be default + attestationVariant: ["gcp-sev-es", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] refStream: ["ref/main/stream/debug/?", "ref/release/stream/stable/?"] test: ["sonobuoy quick"] - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write checks: write contents: read packages: write - actions: write needs: [find-latest-image] steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -90,7 +89,7 @@ jobs: gcpIAMCreateServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" kubernetesVersion: ${{ matrix.kubernetesVersion }} test: ${{ matrix.test }} - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} registry: ghcr.io @@ -122,16 +121,6 @@ jobs: azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ steps.e2e_test.outputs.namePrefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - - name: Notify about failure if: | failure() && @@ -150,7 +139,7 @@ jobs: e2e-mini: name: Run miniconstellation E2E test - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 environment: e2e permissions: id-token: write @@ -159,12 +148,12 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Azure login OIDC - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 + uses: azure/login@cb79c773a3cfa27f31f25eb3f677781210c9ce3d # v1.6.1 with: client-id: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -176,7 +165,7 @@ jobs: azureClientID: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} azureTenantID: ${{ secrets.AZURE_TENANT_ID }} - azureIAMCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} registry: ghcr.io githubToken: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/e2e-test-internal-lb.yml b/.github/workflows/e2e-test-internal-lb.yml index ab36cec4a..6e87bd30d 100644 --- a/.github/workflows/e2e-test-internal-lb.yml +++ b/.github/workflows/e2e-test-internal-lb.yml @@ -11,20 +11,19 @@ on: description: "Which attestation variant to use." type: choice options: - - "aws-sev-snp" + - "gcp-sev-es" - "azure-sev-snp" - "azure-tdx" - - "gcp-sev-es" - - "gcp-sev-snp" + - "aws-sev-snp" default: "azure-sev-snp" required: true runner: description: "Architecture of the runner that executes the CLI" type: choice options: - - "ubuntu-24.04" - - "macos-latest" - default: "ubuntu-24.04" + - "ubuntu-22.04" + - "macos-12" + default: "ubuntu-22.04" test: description: "The test to run." type: choice @@ -41,6 +40,7 @@ on: required: true kubernetesVersion: description: "Kubernetes version to create the cluster from." + default: "1.28" required: true cliVersion: description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref." diff --git a/.github/workflows/e2e-test-marketplace-image.yml b/.github/workflows/e2e-test-marketplace-image.yml index 28e8e9310..94e790cbb 100644 --- a/.github/workflows/e2e-test-marketplace-image.yml +++ b/.github/workflows/e2e-test-marketplace-image.yml @@ -11,20 +11,19 @@ on: description: "Which attestation variant to use." type: choice options: - - "aws-sev-snp" + - "gcp-sev-es" - "azure-sev-snp" - "azure-tdx" - - "gcp-sev-es" - - "gcp-sev-snp" + - "aws-sev-snp" default: "azure-sev-snp" required: true runner: description: "Architecture of the runner that executes the CLI" type: choice options: - - "ubuntu-24.04" - - "macos-latest" - default: "ubuntu-24.04" + - "ubuntu-22.04" + - "macos-12" + default: "ubuntu-22.04" test: description: "The test to run." type: choice @@ -41,6 +40,7 @@ on: required: true kubernetesVersion: description: "Kubernetes version to create the cluster from." + default: "1.28" required: true cliVersion: description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref." diff --git a/.github/workflows/e2e-test-provider-example.yml b/.github/workflows/e2e-test-provider-example.yml index 43eacd005..5359358c8 100644 --- a/.github/workflows/e2e-test-provider-example.yml +++ b/.github/workflows/e2e-test-provider-example.yml @@ -31,7 +31,6 @@ on: - "azure-sev-snp" - "azure-tdx" - "gcp-sev-es" - - "gcp-sev-snp" default: "azure-sev-snp" required: true workflow_call: @@ -63,7 +62,7 @@ on: jobs: provider-example-test: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -71,7 +70,7 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: ref: ${{ inputs.ref || github.head_ref }} @@ -113,6 +112,8 @@ jobs: - name: Setup bazel uses: ./.github/actions/setup_bazel_nix with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} nixTools: terraform - name: Create prefix @@ -154,7 +155,7 @@ jobs: - name: Login to AWS (IAM + Cluster role) if: steps.determine.outputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2ETerraform aws-region: eu-central-1 @@ -264,21 +265,11 @@ jobs: run: | region=$(echo ${{ inputs.regionZone || 'europe-west3-b' }} | rev | cut -c 3- | rev) - case "${{ inputs.attestationVariant }}" in - "gcp-sev-snp") - cc_tech="SEV_SNP" - ;; - *) - cc_tech="SEV" - ;; - esac - cat >> _override.tf <> _override.tf <> _override.tf <> /etc/hosts' terraform init if [[ "${{ inputs.attestationVariant }}" == "azure-sev-snp" ]]; then - timeout 1h terraform apply -target module.azure_iam -auto-approve - timeout 1h terraform apply -target module.azure_infrastructure -auto-approve + terraform apply -target module.azure_iam -auto-approve + terraform apply -target module.azure_infrastructure -auto-approve ${{ github.workspace }}/build/constellation maa-patch "$(terraform output -raw maa_url)" - timeout 1h terraform apply -target constellation_cluster.azure_example -auto-approve + terraform apply -target constellation_cluster.azure_example -auto-approve else - timeout 1h terraform apply -auto-approve + terraform apply -auto-approve fi - name: Cleanup Terraform Cluster on failure @@ -353,7 +330,7 @@ jobs: shell: bash run: | terraform init - terraform destroy -auto-approve -lock=false + terraform destroy -auto-approve - name: Add Provider to local Terraform registry # needed if release version was used before if: inputs.providerVersion != '' @@ -407,7 +384,7 @@ jobs: shell: bash run: | terraform init --upgrade - timeout 1h terraform apply -auto-approve + terraform apply -auto-approve - name: Assert upgrade successful working-directory: ${{ github.workspace }}/cluster @@ -415,7 +392,7 @@ jobs: IMAGE: ${{ inputs.toImage && inputs.toImage || steps.find-latest-image.outputs.image }} KUBERNETES: ${{ inputs.toKubernetes }} MICROSERVICES: ${{ steps.build.outputs.build_version }} - WORKERNODES: 1 + WORKERNODES: 1 CONTROLNODES: 1 run: | terraform output -raw kubeconfig > constellation-admin.conf @@ -466,20 +443,20 @@ jobs: yq e '.nodeGroups.control_plane_default.zone = "eu-central-1a"' -i constellation-conf.yaml yq e '.nodeGroups.worker_default.zone = "eu-central-1a"' -i constellation-conf.yaml fi - KUBECONFIG=${{ github.workspace }}/cluster/constellation-admin.conf bazel run --test_timeout=14400 //e2e/provider-upgrade:provider-upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --cli "${{ github.workspace }}/build/constellation" "$IMAGE_FLAG" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG" + KUBECONFIG=${{ github.workspace }}/cluster/constellation-admin.conf bazel run //e2e/provider-upgrade:provider-upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --cli "${{ github.workspace }}/build/constellation" "$IMAGE_FLAG" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG" - name: Destroy Terraform Cluster - # outcome is part of the steps context (https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context) + # outcome is part of the steps context (https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context) if: always() && steps.apply_terraform.outcome != 'skipped' working-directory: ${{ github.workspace }}/cluster shell: bash run: | terraform init - terraform destroy -auto-approve -lock=false + terraform destroy -auto-approve - name: Notify about failure if: | - (failure() || cancelled()) && + failure() && github.ref == 'refs/heads/main' && github.event_name == 'schedule' continue-on-error: true diff --git a/.github/workflows/e2e-test-release.yml b/.github/workflows/e2e-test-release.yml index 4b8f5beb0..d71d5d263 100644 --- a/.github/workflows/e2e-test-release.yml +++ b/.github/workflows/e2e-test-release.yml @@ -39,98 +39,219 @@ jobs: fail-fast: false max-parallel: 9 matrix: - test: - - "sonobuoy full" - - "verify" - - "recover" - - "lb" - - "autoscaling" - - "perf-bench" - - "malicious join" - - attestationVariant: - - "gcp-sev-es" - - "gcp-sev-snp" - - "azure-sev-snp" - - "azure-tdx" - - "aws-sev-snp" - kubernetes-version: ["v1.32"] - clusterCreation: ["cli"] - runner: ["ubuntu-24.04"] include: + # + # Tests on ubuntu runner + # + # sonobuoy full test on all k8s versions - test: "sonobuoy full" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.31" - runner: "ubuntu-24.04" - clusterCreation: "cli" - - test: "sonobuoy full" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.31" - runner: "ubuntu-24.04" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.31" - runner: "ubuntu-24.04" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "azure-tdx" - kubernetes-version: "v1.31" - runner: "ubuntu-24.04" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.31" - runner: "ubuntu-24.04" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + + - test: "sonobuoy full" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.28" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "sonobuoy full" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.28" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "sonobuoy full" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.28" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "sonobuoy full" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.28" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.30" - runner: "ubuntu-24.04" - clusterCreation: "cli" - - test: "sonobuoy full" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.30" - runner: "ubuntu-24.04" + kubernetes-version: "v1.27" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.30" - runner: "ubuntu-24.04" + kubernetes-version: "v1.27" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "azure-tdx" - kubernetes-version: "v1.30" - runner: "ubuntu-24.04" + kubernetes-version: "v1.27" + runner: "ubuntu-22.04" clusterCreation: "cli" - test: "sonobuoy full" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.30" - runner: "ubuntu-24.04" + kubernetes-version: "v1.27" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # verify test on latest k8s version + - test: "verify" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "verify" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "verify" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "verify" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # recover test on latest k8s version + - test: "recover" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "recover" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "recover" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "recover" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # lb test on latest k8s version + - test: "lb" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "lb" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "lb" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "lb" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # autoscaling test on latest k8s version + - test: "autoscaling" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "autoscaling" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "autoscaling" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "autoscaling" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # perf-bench test on latest k8s version, not supported on AWS + - test: "perf-bench" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + - test: "perf-bench" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" clusterCreation: "cli" # s3proxy test on latest k8s version - test: "s3proxy" + refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - runner: "ubuntu-24.04" + kubernetes-version: "v1.29" + runner: "ubuntu-22.04" + clusterCreation: "cli" + + # malicious join test on latest k8s version + - test: "malicious join" + refStream: "ref/main/stream/debug/?" + attestationVariant: "gcp-sev-es" + kubernetes-version: "v1.29" + clusterCreation: "cli" + - test: "malicious join" + refStream: "ref/main/stream/debug/?" + attestationVariant: "azure-sev-snp" + kubernetes-version: "v1.29" + clusterCreation: "cli" + - test: "malicious join" + refStream: "ref/main/stream/debug/?" + attestationVariant: "azure-tdx" + kubernetes-version: "v1.29" + clusterCreation: "cli" + - test: "malicious join" + refStream: "ref/main/stream/debug/?" + attestationVariant: "aws-sev-snp" + kubernetes-version: "v1.29" clusterCreation: "cli" # # Tests on macOS runner # # Skipping verify test on MacOS since the runner uses a different version of sed - # TODO: Update verify test to work on MacOS runners + # TODO(3u13r): Update verify test to work on MacOS runners # - test: "verify" # attestationVariant: "azure-sev-snp" - # kubernetes-version: "v1.31" - # runner: "macos-latest" + # kubernetes-version: "v1.29" + # runner: "macos-12" - test: "recover" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - runner: "macos-latest" + kubernetes-version: "v1.29" + runner: "macos-12" clusterCreation: "cli" runs-on: ${{ matrix.runner }} permissions: @@ -138,7 +259,6 @@ jobs: checks: write contents: read packages: write - actions: write steps: - name: Install the basics tools (macOS) if: runner.os == 'macOS' @@ -146,7 +266,7 @@ jobs: run: brew install coreutils kubectl bash - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ inputs.ref || github.head_ref }} @@ -162,7 +282,7 @@ jobs: - name: Set up gcloud CLI (macOS) if: steps.split-attestationVariant.outputs.provider == 'gcp' && runner.os == 'macOS' - uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 + uses: google-github-actions/setup-gcloud@98ddc00a17442e89a24bbf282954a3b65ce6d200 # v2.1.0 - name: Run E2E test id: e2e_test @@ -184,7 +304,7 @@ jobs: gcpClusterCreateServiceAccount: "infrastructure-e2e@constellation-e2e.iam.gserviceaccount.com" gcpIAMCreateServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" test: ${{ matrix.test }} - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} registry: ghcr.io @@ -214,23 +334,13 @@ jobs: azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ steps.e2e_test.outputs.namePrefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - e2e-upgrade: strategy: fail-fast: false max-parallel: 1 matrix: - fromVersion: ["v2.23.1"] - attestationVariant: ["gcp-sev-snp", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] + fromVersion: ["v2.15.1"] + attestationVariant: ["gcp-sev-es", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] name: Run upgrade tests secrets: inherit permissions: @@ -238,7 +348,6 @@ jobs: contents: read checks: write packages: write - actions: write uses: ./.github/workflows/e2e-upgrade.yml with: fromVersion: ${{ matrix.fromVersion }} diff --git a/.github/workflows/e2e-test-stackit.yml b/.github/workflows/e2e-test-stackit.yml deleted file mode 100644 index 1977d09cc..000000000 --- a/.github/workflows/e2e-test-stackit.yml +++ /dev/null @@ -1,153 +0,0 @@ -name: e2e test STACKIT - -on: - workflow_dispatch: - schedule: - - cron: "0 0 * * *" # Every day at midnight. - -jobs: - find-latest-image: - name: Find latest image - runs-on: ubuntu-24.04 - permissions: - id-token: write - contents: read - outputs: - image-release-stable: ${{ steps.relabel-output.outputs.image-release-stable }} - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - - - name: Select relevant image - id: select-image-action - uses: ./.github/actions/select_image - with: - osImage: "ref/release/stream/stable/?" - - - name: Relabel output - id: relabel-output - shell: bash - run: | - ref=$(echo 'ref/release/stream/stable/?' | cut -d/ -f2) - stream=$(echo 'ref/release/stream/stable/?' | cut -d/ -f4) - - echo "image-$ref-$stream=${{ steps.select-image-action.outputs.osImage }}" | tee -a "$GITHUB_OUTPUT" - - e2e-stackit: - strategy: - fail-fast: false - max-parallel: 6 - matrix: - kubernetesVersion: [ "1.29", "1.30", "1.31" ] - clusterCreation: [ "cli", "terraform" ] - test: [ "sonobuoy quick" ] - runs-on: ubuntu-24.04 - permissions: - id-token: write - checks: write - contents: read - packages: write - actions: write - needs: [find-latest-image] - steps: - - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - - - name: Setup bazel - uses: ./.github/actions/setup_bazel_nix - with: - nixTools: terraform - - - name: Run E2E test - id: e2e_test - uses: ./.github/actions/e2e_test - with: - workerNodesCount: "1" - controlNodesCount: "1" - cloudProvider: stackit - attestationVariant: qemu-vtpm - osImage: ${{ needs.find-latest-image.outputs.image-release-stable }} - isDebugImage: false - cliVersion: ${{ needs.find-latest-image.outputs.image-release-stable || '' }} - kubernetesVersion: ${{ matrix.kubernetesVersion }} - awsOpenSearchDomain: ${{ secrets.AWS_OPENSEARCH_DOMAIN }} - awsOpenSearchUsers: ${{ secrets.AWS_OPENSEARCH_USER }} - awsOpenSearchPwd: ${{ secrets.AWS_OPENSEARCH_PWD }} - gcpProject: constellation-e2e - gcpClusterCreateServiceAccount: "infrastructure-e2e@constellation-e2e.iam.gserviceaccount.com" - gcpIAMCreateServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - test: ${{ matrix.test }} - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} - azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} - registry: ghcr.io - githubToken: ${{ secrets.GITHUB_TOKEN }} - cosignPassword: ${{ secrets.COSIGN_PASSWORD }} - cosignPrivateKey: ${{ secrets.COSIGN_PRIVATE_KEY }} - fetchMeasurements: false - clusterCreation: ${{ matrix.clusterCreation }} - s3AccessKey: ${{ secrets.AWS_ACCESS_KEY_ID_S3PROXY }} - s3SecretKey: ${{ secrets.AWS_SECRET_ACCESS_KEY_S3PROXY }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - openStackCloudsYaml: ${{ secrets.STACKIT_CI_CLOUDS_YAML }} - stackitUat: ${{ secrets.STACKIT_CI_UAT }} - stackitProjectID: ${{ secrets.STACKIT_CI_PROJECT_ID }} - - - name: Always terminate cluster - if: always() - uses: ./.github/actions/constellation_destroy - with: - kubeconfig: ${{ steps.e2e_test.outputs.kubeconfig }} - clusterCreation: ${{ matrix.clusterCreation }} - cloudProvider: stackit - azureClusterDeleteCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} - gcpClusterDeleteServiceAccount: "infrastructure-e2e@constellation-e2e.iam.gserviceaccount.com" - - - name: Always delete IAM configuration - if: always() - uses: ./.github/actions/constellation_iam_destroy - with: - cloudProvider: stackit - azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} - gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ steps.e2e_test.outputs.namePrefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - - - name: Notify about failure - if: | - failure() && - github.ref == 'refs/heads/main' && - github.event_name == 'schedule' - continue-on-error: true - uses: ./.github/actions/notify_e2e_failure - with: - projectWriteToken: ${{ secrets.PROJECT_WRITE_TOKEN }} - refStream: "ref/release/stream/stable/?" - test: ${{ matrix.test }} - kubernetesVersion: ${{ matrix.kubernetesVersion }} - provider: stackit - attestationVariant: qemu-vtpm - clusterCreation: ${{ matrix.clusterCreation }} - - - name: Notify STACKIT - if: | - failure() && - github.ref == 'refs/heads/main' && - github.event_name == 'schedule' - continue-on-error: true - uses: ./.github/actions/notify_stackit - with: - slackToken: ${{ secrets.SLACK_TOKEN }} diff --git a/.github/workflows/e2e-test-terraform-provider.yml b/.github/workflows/e2e-test-terraform-provider.yml index c7aa5a0e5..f62b204d1 100644 --- a/.github/workflows/e2e-test-terraform-provider.yml +++ b/.github/workflows/e2e-test-terraform-provider.yml @@ -11,20 +11,19 @@ on: description: "Which attestation variant to use." type: choice options: - - "aws-sev-snp" + - "gcp-sev-es" - "azure-sev-snp" - "azure-tdx" - - "gcp-sev-es" - - "gcp-sev-snp" + - "aws-sev-snp" default: "azure-sev-snp" required: true runner: description: "Architecture of the runner that executes the CLI" type: choice options: - - "ubuntu-24.04" - - "macos-latest" - default: "ubuntu-24.04" + - "ubuntu-22.04" + - "macos-12" + default: "ubuntu-22.04" test: description: "The test to run." type: choice @@ -41,6 +40,7 @@ on: required: true kubernetesVersion: description: "Kubernetes version to create the cluster from." + default: "1.28" required: true releaseVersion: description: "Version of a released provider to download. Leave empty to build the provider from the checked out ref." diff --git a/.github/workflows/e2e-test-weekly.yml b/.github/workflows/e2e-test-weekly.yml index dbc33d9fd..8e4459e8c 100644 --- a/.github/workflows/e2e-test-weekly.yml +++ b/.github/workflows/e2e-test-weekly.yml @@ -10,9 +10,9 @@ jobs: strategy: fail-fast: false matrix: - refStream: ["ref/main/stream/nightly/?", "ref/main/stream/debug/?", "ref/release/stream/stable/?"] + refStream: ["ref/main/stream/nightly/?","ref/main/stream/debug/?", "ref/release/stream/stable/?"] name: Find latest image - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -22,7 +22,7 @@ jobs: image-main-nightly: ${{ steps.relabel-output.outputs.image-main-nightly }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -51,261 +51,184 @@ jobs: # Tests on main-debug refStream # - # Emergency SSH test on latest k8s version - - test: "emergency ssh" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "emergency ssh" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "emergency ssh" - refStream: "ref/main/stream/debug/?" - attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "emergency ssh" - refStream: "ref/main/stream/debug/?" - attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "emergency ssh" - refStream: "ref/main/stream/debug/?" - attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - # Sonobuoy full test on latest k8s version - test: "sonobuoy full" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "sonobuoy full" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "sonobuoy full" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "sonobuoy full" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "sonobuoy full" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - # Sonobuoy conformance test - - test: "sonobuoy conformance" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" # Sonobuoy quick test on all but the latest k8s versions - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.31" - clusterCreation: "cli" - - test: "sonobuoy quick" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.30" - clusterCreation: "cli" - - test: "sonobuoy quick" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.30" + kubernetes-version: "v1.27" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.30" + kubernetes-version: "v1.27" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.30" + kubernetes-version: "v1.27" clusterCreation: "cli" - test: "sonobuoy quick" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.30" + kubernetes-version: "v1.27" clusterCreation: "cli" + # verify test on latest k8s version - test: "verify" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "verify" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "verify" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" azureSNPEnforcementPolicy: "equal" # This run checks for unknown ID Key disgests. clusterCreation: "cli" - test: "verify" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "verify" attestationVariant: "aws-sev-snp" refStream: "ref/main/stream/debug/?" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" # recover test on latest k8s version - test: "recover" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "recover" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "recover" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "recover" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "recover" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" # lb test on latest k8s version - test: "lb" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "lb" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "lb" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "lb" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "lb" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" # autoscaling test on latest k8s version - test: "autoscaling" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "autoscaling" - refStream: "ref/main/stream/debug/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "autoscaling" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "autoscaling" refStream: "ref/main/stream/debug/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "autoscaling" refStream: "ref/main/stream/debug/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - # perf-bench test on latest k8s version + # perf-bench test on latest k8s version, not supported on AWS - test: "perf-bench" - refStream: "ref/main/stream/nightly/?" + refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" - test: "perf-bench" - refStream: "ref/main/stream/nightly/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "perf-bench" - refStream: "ref/main/stream/nightly/?" + refStream: "ref/main/stream/debug/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "perf-bench" - refStream: "ref/main/stream/nightly/?" - attestationVariant: "azure-tdx" - kubernetes-version: "v1.32" - clusterCreation: "cli" - - test: "perf-bench" - refStream: "ref/main/stream/nightly/?" - attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" + # TODO: check what needs to be done for perf-bench on Azure TDX + #- test: "perf-bench" + # refStream: "ref/main/stream/debug/?" + # attestationVariant: "azure-tdx" + # kubernetes-version: "v1.29" + # clusterCreation: "cli" # s3proxy test on latest k8s version - test: "s3proxy" refStream: "ref/main/stream/debug/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.32" + kubernetes-version: "v1.29" clusterCreation: "cli" # @@ -316,40 +239,34 @@ jobs: - test: "verify" refStream: "ref/release/stream/stable/?" attestationVariant: "gcp-sev-es" - kubernetes-version: "v1.31" - clusterCreation: "cli" - - test: "verify" - refStream: "ref/release/stream/stable/?" - attestationVariant: "gcp-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "verify" refStream: "ref/release/stream/stable/?" attestationVariant: "azure-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "verify" refStream: "ref/release/stream/stable/?" attestationVariant: "azure-tdx" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - test: "verify" refStream: "ref/release/stream/stable/?" attestationVariant: "aws-sev-snp" - kubernetes-version: "v1.31" + kubernetes-version: "v1.28" clusterCreation: "cli" - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write checks: write contents: read packages: write - actions: write needs: [find-latest-image] steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -371,7 +288,7 @@ jobs: controlNodesCount: "3" cloudProvider: ${{ steps.split-attestationVariant.outputs.cloudProvider }} attestationVariant: ${{ matrix.attestationVariant }} - osImage: ${{ matrix.refStream == 'ref/release/stream/stable/?' && needs.find-latest-image.outputs.image-release-stable || matrix.refStream == 'ref/main/stream/nightly/?' && needs.find-latest-image.outputs.image-main-nightly || needs.find-latest-image.outputs.image-main-debug }} + osImage: ${{ matrix.refStream == 'ref/release/stream/stable/?' && needs.find-latest-image.outputs.image-release-stable || needs.find-latest-image.outputs.image-main-debug }} isDebugImage: ${{ matrix.refStream == 'ref/main/stream/debug/?' }} cliVersion: ${{ matrix.refStream == 'ref/release/stream/stable/?' && needs.find-latest-image.outputs.image-release-stable || '' }} kubernetesVersion: ${{ matrix.kubernetes-version }} @@ -383,7 +300,7 @@ jobs: gcpClusterCreateServiceAccount: "infrastructure-e2e@constellation-e2e.iam.gserviceaccount.com" gcpIAMCreateServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" test: ${{ matrix.test }} - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} registry: ghcr.io @@ -415,16 +332,6 @@ jobs: azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ steps.e2e_test.outputs.namePrefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - - name: Notify about failure if: | failure() && @@ -446,8 +353,8 @@ jobs: fail-fast: false max-parallel: 1 matrix: - fromVersion: ["v2.23.1"] - attestationVariant: ["gcp-sev-snp", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] + fromVersion: ["v2.15.1"] + attestationVariant: ["gcp-sev-es", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] name: Run upgrade tests secrets: inherit permissions: @@ -455,7 +362,6 @@ jobs: checks: write contents: read packages: write - actions: write uses: ./.github/workflows/e2e-upgrade.yml with: fromVersion: ${{ matrix.fromVersion }} @@ -465,7 +371,7 @@ jobs: e2e-mini: name: Run miniconstellation E2E test - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 environment: e2e permissions: id-token: write @@ -474,12 +380,12 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Azure login OIDC - uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 + uses: azure/login@cb79c773a3cfa27f31f25eb3f677781210c9ce3d # v1.6.1 with: client-id: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} tenant-id: ${{ secrets.AZURE_TENANT_ID }} @@ -491,7 +397,7 @@ jobs: azureClientID: ${{ secrets.AZURE_E2E_MINI_CLIENT_ID }} azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} azureTenantID: ${{ secrets.AZURE_TENANT_ID }} - azureIAMCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} registry: ghcr.io githubToken: ${{ secrets.GITHUB_TOKEN }} @@ -514,7 +420,6 @@ jobs: id-token: write contents: read packages: write - checks: write secrets: inherit uses: ./.github/workflows/e2e-windows.yml with: @@ -525,7 +430,7 @@ jobs: strategy: fail-fast: false matrix: - attestationVariant: ["gcp-sev-snp", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] + attestationVariant: ["gcp-sev-es", "azure-sev-snp", "azure-tdx", "aws-sev-snp"] permissions: id-token: write contents: read diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index 46efa290d..1d5fcfdca 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -12,27 +12,24 @@ on: type: choice options: - "gcp-sev-es" - - "gcp-sev-snp" - "azure-sev-snp" - "azure-tdx" - "aws-sev-snp" - - "stackit-qemu-vtpm" default: "azure-sev-snp" required: true runner: description: "Architecture of the runner that executes the CLI" type: choice options: - - "ubuntu-24.04" - - "macos-latest" - default: "ubuntu-24.04" + - "ubuntu-22.04" + - "macos-12" + default: "ubuntu-22.04" test: - description: "The test to run. The conformance test is only supported for clusterCreation=cli." + description: "The test to run." type: choice options: - "sonobuoy quick" - "sonobuoy full" - - "sonobuoy conformance" - "autoscaling" - "lb" - "perf-bench" @@ -40,12 +37,11 @@ on: - "recover" - "malicious join" - "s3proxy" - - "emergency ssh" - "nop" required: true kubernetesVersion: description: "Kubernetes version to create the cluster from." - default: "1.30" + default: "1.28" required: true cliVersion: description: "Version of a released CLI to download. Leave empty to build the CLI from the checked out ref." @@ -85,7 +81,7 @@ on: type: string required: true test: - description: "The test to run. The conformance test is only supported for clusterCreation=cli." + description: "The test to run." type: string required: true kubernetesVersion: @@ -131,7 +127,7 @@ on: jobs: generate-input-parameters: name: Generate input parameters - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -139,7 +135,6 @@ jobs: workerNodes: ${{ steps.split-nodeCount.outputs.workerNodes }} controlPlaneNodes: ${{ steps.split-nodeCount.outputs.controlPlaneNodes }} cloudProvider: ${{ steps.split-attestationVariant.outputs.cloudProvider }} - attestationVariant: ${{ steps.split-attestationVariant.outputs.attestationVariant }} steps: - name: Split nodeCount id: split-nodeCount @@ -164,17 +159,11 @@ jobs: attestationVariant="${{ inputs.attestationVariant }}" cloudProvider="${attestationVariant%%-*}" - # special case for STACKIT, as there's no special attestation variant for it - if [[ "${cloudProvider}" == "stackit" ]]; then - attestationVariant="qemu-vtpm" - fi - - echo "attestationVariant=${attestationVariant}" | tee -a "$GITHUB_OUTPUT" echo "cloudProvider=${cloudProvider}" | tee -a "$GITHUB_OUTPUT" find-latest-image: name: Select image - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -184,13 +173,13 @@ jobs: steps: - name: Checkout head if: inputs.git-ref == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.git-ref != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.git-ref }} @@ -210,7 +199,6 @@ jobs: checks: write contents: read packages: write - actions: write needs: [find-latest-image, generate-input-parameters] if: always() && !cancelled() steps: @@ -221,19 +209,19 @@ jobs: - name: Checkout head if: inputs.git-ref == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.git-ref != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ inputs.git-ref }} - name: Set up gcloud CLI (macOS) if: needs.generate-input-parameters.outputs.cloudProvider == 'gcp' && runner.os == 'macOS' - uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4 + uses: google-github-actions/setup-gcloud@98ddc00a17442e89a24bbf282954a3b65ce6d200 # v2.1.0 - name: Run manual E2E test id: e2e_test @@ -242,7 +230,7 @@ jobs: workerNodesCount: ${{ needs.generate-input-parameters.outputs.workerNodes }} controlNodesCount: ${{ needs.generate-input-parameters.outputs.controlPlaneNodes }} cloudProvider: ${{ needs.generate-input-parameters.outputs.cloudProvider }} - attestationVariant: ${{ needs.generate-input-parameters.outputs.attestationVariant }} + attestationVariant: ${{ inputs.attestationVariant }} machineType: ${{ inputs.machineType }} regionZone: ${{ inputs.regionZone }} gcpProject: constellation-e2e @@ -256,7 +244,7 @@ jobs: osImage: ${{ needs.find-latest-image.outputs.image }} cliVersion: ${{ inputs.cliVersion }} isDebugImage: ${{ needs.find-latest-image.outputs.isDebugImage }} - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} registry: ghcr.io @@ -271,9 +259,6 @@ jobs: marketplaceImageVersion: ${{ inputs.marketplaceImageVersion }} force: ${{ inputs.force }} encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - openStackCloudsYaml: ${{ secrets.STACKIT_CI_CLOUDS_YAML }} - stackitUat: ${{ secrets.STACKIT_CI_UAT }} - stackitProjectID: ${{ secrets.STACKIT_CI_PROJECT_ID }} - name: Always terminate cluster if: always() @@ -292,13 +277,3 @@ jobs: cloudProvider: ${{ needs.generate-input-parameters.outputs.cloudProvider }} azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ steps.e2e_test.outputs.namePrefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} diff --git a/.github/workflows/e2e-upgrade.yml b/.github/workflows/e2e-upgrade.yml index cd7c6bc1b..1741e2e9f 100644 --- a/.github/workflows/e2e-upgrade.yml +++ b/.github/workflows/e2e-upgrade.yml @@ -7,11 +7,10 @@ on: description: "Which attestation variant to use." type: choice options: - - "aws-sev-snp" + - "gcp-sev-es" - "azure-sev-snp" - "azure-tdx" - - "gcp-sev-es" - - "gcp-sev-snp" + - "aws-sev-snp" default: "azure-sev-snp" required: true nodeCount: @@ -22,10 +21,6 @@ on: description: CLI version to create a new cluster with. This has to be a released version, e.g., 'v2.1.3'. type: string required: true - fromKubernetes: - description: Kubernetes version for the origin cluster, empty for origin target's default version. - type: string - required: false gitRef: description: Ref to build upgrading CLI on, empty for HEAD. type: string @@ -36,11 +31,11 @@ on: type: string required: false toKubernetes: - description: Kubernetes version to target for the upgrade, empty for upgrade target's default version. + description: Kubernetes version to target for the upgrade, empty for target's default version. type: string required: false toMicroservices: - description: Microservice version to target for the upgrade, empty for upgrade target's default version. + description: Microservice version to target for the upgrade, empty for target's default version. type: string required: false simulatedTargetVersion: @@ -64,10 +59,6 @@ on: description: CLI version to create a new cluster with. This has to be a released version, e.g., 'v2.1.3'. type: string required: true - fromKubernetes: - description: Kubernetes version for the origin cluster, empty for origin target's default version. - type: string - required: false gitRef: description: Ref to build upgrading CLI on. type: string @@ -98,7 +89,7 @@ on: jobs: generate-input-parameters: name: Generate input parameters - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -132,9 +123,63 @@ jobs: echo "cloudProvider=${cloudProvider}" | tee -a "$GITHUB_OUTPUT" + build-target-cli: + name: Build upgrade target version CLI + runs-on: ubuntu-22.04 + permissions: + id-token: write + checks: write + contents: read + packages: write + steps: + - name: Checkout + if: inputs.gitRef == 'head' + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} + + - name: Checkout ref + if: inputs.gitRef != 'head' + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + ref: ${{ inputs.gitRef }} + + - name: Setup Bazel & Nix + uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} + + - name: Log in to the Container registry + uses: ./.github/actions/container_registry_login + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Simulate patch upgrade + if: inputs.simulatedTargetVersion != '' + run: | + echo ${{ inputs.simulatedTargetVersion }} > version.txt + + - name: Build CLI + uses: ./.github/actions/build_cli + with: + enterpriseCLI: true + outputPath: "build/constellation" + push: true + + - name: Upload CLI binary + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: constellation-upgrade-${{ inputs.attestationVariant }} + path: build/constellation + create-cluster: name: Create upgrade origin version cluster - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write checks: write @@ -143,23 +188,25 @@ jobs: needs: [generate-input-parameters] outputs: kubeconfig: ${{ steps.e2e_test.outputs.kubeconfig }} - e2e-name-prefix: ${{ steps.e2e_test.outputs.namePrefix }} steps: - name: Checkout if: inputs.gitRef == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.gitRef != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ inputs.gitRef }} - uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Create cluster with 'fromVersion' CLI. id: e2e_test @@ -172,13 +219,12 @@ jobs: osImage: ${{ inputs.fromVersion }} isDebugImage: "false" cliVersion: ${{ inputs.fromVersion }} - kubernetesVersion: ${{ inputs.fromKubernetes }} regionZone: ${{ inputs.regionZone }} gcpProject: constellation-e2e gcpClusterCreateServiceAccount: "infrastructure-e2e@constellation-e2e.iam.gserviceaccount.com" gcpIAMCreateServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" test: "upgrade" - azureSubscriptionID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} azureClusterCreateCredentials: ${{ secrets.AZURE_E2E_CLUSTER_CREDENTIALS }} azureIAMCreateCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} registry: ghcr.io @@ -220,7 +266,7 @@ jobs: e2e-upgrade: name: Run upgrade test - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write checks: write @@ -228,53 +274,31 @@ jobs: packages: write needs: - generate-input-parameters + - build-target-cli - create-cluster steps: - name: Checkout if: inputs.gitRef == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.gitRef != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ inputs.gitRef }} - name: Setup Bazel & Nix uses: ./.github/actions/setup_bazel_nix - - - name: Log in to the Container registry - uses: ./.github/actions/container_registry_login with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - # applying the version manipulation here so that the upgrade test tool is also on the simulated target version - - name: Simulate patch upgrade - if: inputs.simulatedTargetVersion != '' - run: | - echo ${{ inputs.simulatedTargetVersion }} > version.txt - - - name: Build CLI - uses: ./.github/actions/build_cli - with: - enterpriseCLI: true - outputPath: "build/constellation" - push: true - - - name: Upload CLI binary # is needed for the cleanup step - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: constellation-upgrade-${{ inputs.attestationVariant }} - path: build/constellation + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRead aws-region: eu-central-1 @@ -296,7 +320,7 @@ jobs: - name: Login to AWS (IAM role) if: needs.generate-input-parameters.outputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2EIAM aws-region: eu-central-1 @@ -309,6 +333,11 @@ jobs: with: azure_credentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} + - name: Download CLI + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 + with: + name: constellation-upgrade-${{ inputs.attestationVariant }} + path: build - name: Download Working Directory (Pre-test) uses: ./.github/actions/artifact_download @@ -347,7 +376,7 @@ jobs: - name: Login to AWS (Cluster role) if: always() && needs.generate-input-parameters.outputs.cloudProvider == 'aws' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionsE2ECluster aws-region: eu-central-1 @@ -366,16 +395,21 @@ jobs: IMAGE: ${{ inputs.toImage && inputs.toImage || steps.find-image.outputs.output }} KUBERNETES: ${{ inputs.toKubernetes }} MICROSERVICES: ${{ inputs.toMicroservices }} - WORKERNODES: ${{ needs.generate-input-parameters.outputs.workerNodes }} + WORKERNODES: ${{ needs.generate-input-parameters.outputs.workerNodes }} CONTROLNODES: ${{ needs.generate-input-parameters.outputs.controlPlaneNodes }} run: | echo "Image target: $IMAGE" echo "K8s target: $KUBERNETES" echo "Microservice target: $MICROSERVICES" - sudo sh -c 'echo "127.0.0.1 license.confidential.cloud" >> /etc/hosts' - CLI=$(realpath ./build/constellation) - bazel run --test_timeout=14400 //e2e/internal/upgrade:upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --target-image "$IMAGE" --target-kubernetes "$KUBERNETES" --target-microservices "$MICROSERVICES" --cli "$CLI" + if [[ -n ${MICROSERVICES} ]]; then + MICROSERVICES_FLAG="--target-microservices=$MICROSERVICES" + fi + if [[ -n ${KUBERNETES} ]]; then + KUBERNETES_FLAG="--target-kubernetes=$KUBERNETES" + fi + + bazel run //e2e/internal/upgrade:upgrade_test -- --want-worker "$WORKERNODES" --want-control "$CONTROLNODES" --target-image "$IMAGE" "$KUBERNETES_FLAG" "$MICROSERVICES_FLAG" - name: Remove Terraform plugin cache if: always() @@ -399,32 +433,31 @@ jobs: clean-up: name: Clean up resources - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write checks: write contents: read packages: write - actions: write if: always() needs: [generate-input-parameters, create-cluster, e2e-upgrade] steps: - name: Checkout if: inputs.gitRef == 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Checkout ref if: inputs.gitRef != 'head' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 ref: ${{ inputs.gitRef }} - name: Download CLI - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: constellation-upgrade-${{ inputs.attestationVariant }} path: build @@ -470,17 +503,6 @@ jobs: constellation-version.yaml encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - - name: Prepare terraform state artifact upload - if: always() - shell: bash - run: | - mkdir -p to-zip - cp -r constellation-terraform to-zip - rm -f to-zip/constellation-terraform/plan.zip - rm -rf to-zip/constellation-terraform/.terraform - cp -r constellation-iam-terraform to-zip - rm -rf to-zip/constellation-iam-terraform/.terraform - - name: Always terminate cluster if: always() uses: ./.github/actions/constellation_destroy @@ -499,16 +521,6 @@ jobs: azureCredentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} gcpServiceAccount: "iam-e2e@constellation-e2e.iam.gserviceaccount.com" - - name: Update tfstate - if: always() - env: - GH_TOKEN: ${{ github.token }} - uses: ./.github/actions/update_tfstate - with: - name: terraform-state-${{ needs.create-cluster.outputs.e2e-name-prefix }} - runID: ${{ github.run_id }} - encryptionSecret: ${{ secrets.ARTIFACT_ENCRYPT_PASSWD }} - - name: Notify about failure if: | always() && diff --git a/.github/workflows/e2e-windows.yml b/.github/workflows/e2e-windows.yml index ad4acb877..786b87978 100644 --- a/.github/workflows/e2e-windows.yml +++ b/.github/workflows/e2e-windows.yml @@ -13,27 +13,18 @@ on: jobs: build-cli: name: Build Windows CLI - runs-on: ubuntu-24.04 - permissions: - id-token: write - checks: write - contents: read - packages: write + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Setup bazel uses: ./.github/actions/setup_bazel_nix - - - name: Log in to the Container registry - uses: ./.github/actions/container_registry_login with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Build CLI uses: ./.github/actions/build_cli @@ -41,35 +32,33 @@ jobs: targetOS: "windows" targetArch: "amd64" enterpriseCLI: true - outputPath: "build/constellation" - push: true - name: Upload CLI artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - path: build/constellation.exe + path: "bazel-bin/cli/cli_enterprise_windows_amd64" name: "constell-exe" e2e-test: name: E2E Test Windows - runs-on: windows-2025 + runs-on: windows-2022 needs: build-cli steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Download CLI artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: name: "constell-exe" - name: Check CLI version shell: pwsh run: | + Move-Item -Path .\cli_enterprise_windows_amd64 -Destination .\constellation.exe .\constellation.exe version - Add-Content -Path $env:windir\System32\drivers\etc\hosts -Value "`n127.0.0.1`tlicense.confidential.cloud" -Force - name: Login to Azure (IAM service principal) uses: ./.github/actions/login_azure @@ -77,14 +66,10 @@ jobs: azure_credentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} - name: Create IAM configuration - id: iam-create shell: pwsh run: | - $uid = Get-Random -Minimum 1000 -Maximum 9999 - $rgName = "e2e-win-${{ github.run_id }}-${{ github.run_attempt }}-$uid" - "rgName=$($rgName)" | Out-File -FilePath $env:GITHUB_OUTPUT -Append - .\constellation.exe config generate azure -t "workflow=${{ github.run_id }}" - .\constellation.exe iam create azure --subscriptionID=${{ secrets.AZURE_SUBSCRIPTION_ID }} --region=westus --resourceGroup=$rgName-rg --servicePrincipal=$rgName-sp --update-config --debug -y + .\constellation.exe config generate azure + .\constellation.exe iam create azure --region=westus --resourceGroup=e2eWindoewsRG --servicePrincipal=e2eWindoewsSP --update-config --debug -y - name: Login to Azure (Cluster service principal) uses: ./.github/actions/login_azure @@ -110,31 +95,24 @@ jobs: Write-Host "Retry ${retryCount}: Checking node status..." $nodesOutput = & kubectl get nodes --kubeconfig "$PWD\constellation-admin.conf" - $status = $? - $nodesOutput + $lines = $nodesOutput -split "`r?`n" | Select-Object -Skip 1 - if ($status) { - $lines = $nodesOutput -split "`r?`n" | Select-Object -Skip 1 + $allNodesReady = $true - if ($lines.count -eq 4) { - $allNodesReady = $true + foreach ($line in $lines) { + $columns = $line -split '\s+' | Where-Object { $_ -ne '' } - foreach ($line in $lines) { - $columns = $line -split '\s+' | Where-Object { $_ -ne '' } + $nodeName = $columns[0] + $status = $columns[1] - $nodeName = $columns[0] - $status = $columns[1] - - if ($status -ne "Ready") { - Write-Host "Node $nodeName is not ready!" - $allNodesReady = $false - } - } + if ($status -ne "Ready") { + Write-Host "Node $nodeName is not ready!" + $allNodesReady = $false } } - if (-not $allNodesReady -and $retryCount -lt $maxRetries) { + if (-not $allNodesReady) { Write-Host "Retrying in $retryIntervalSeconds seconds..." Start-Sleep -Seconds $retryIntervalSeconds } @@ -149,7 +127,6 @@ jobs: } - name: Terminate cluster - id: terminate-cluster if: always() shell: pwsh run: | @@ -162,23 +139,14 @@ jobs: azure_credentials: ${{ secrets.AZURE_E2E_IAM_CREDENTIALS }} - name: Delete IAM configuration - id: delete-iam if: always() shell: pwsh run: | .\constellation.exe iam destroy --debug -y - - name: Clean up after failure - # run on a cleanup failure or if cancelled - if: (failure() && (steps.terminate-cluster.conclusion == 'failure' || steps.delete-iam.conclusion == 'failure')) || cancelled() - shell: pwsh - run: | - az group delete --name ${{ steps.iam-create.outputs.rgName }}-rg --yes - az group delete --name ${{ steps.iam-create.outputs.rgName }}-rg-identity --yes - notify-failure: name: Notify about failure - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 needs: e2e-test if: | failure() && @@ -186,12 +154,15 @@ jobs: inputs.scheduled steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Setup bazel uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Notify about failure continue-on-error: true diff --git a/.github/workflows/on-release.yml b/.github/workflows/on-release.yml index 73fe2c3b1..f2853a7a8 100644 --- a/.github/workflows/on-release.yml +++ b/.github/workflows/on-release.yml @@ -15,7 +15,7 @@ on: jobs: complete-release-branch-transaction: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: write @@ -26,7 +26,7 @@ jobs: WORKING_BRANCH: ${{ env.WORKING_BRANCH }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 # fetch all history @@ -44,12 +44,12 @@ jobs: git push origin "${WORKING_BRANCH}":"${RELEASE_BRANCH}" update: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 outputs: latest: ${{ steps.input-passthrough.outputs.latest }}${{ steps.check-last-release.outputs.latest }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Override latest if: github.event.inputs.latest == 'true' @@ -117,13 +117,13 @@ jobs: add-image-version-to-versionsapi, add-cli-version-to-versionsapi, ] - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: write steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Remove temporary branch run: git push origin --delete "${{needs.complete-release-branch-transaction.outputs.WORKING_BRANCH}}" @@ -131,18 +131,20 @@ jobs: mirror-gcp-mpi: name: "Mirror GCP Marketplace Image" needs: [add-image-version-to-versionsapi] - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: ./.github/actions/setup_bazel_nix + with: + useCache: "false" - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GitHubConstellationImagePipeline aws-region: eu-central-1 @@ -160,7 +162,7 @@ jobs: shell: bash run: | aws s3 cp s3://cdn-constellation-backend/constellation/v2/ref/-/stream/stable/${{ steps.fetch-version.outputs.output }}/image/info.json . - FULL_REF=$(yq e -r -oy '.list.[] | select(.attestationVariant == "gcp-sev-snp") | .reference' info.json) + FULL_REF=$(yq e -r -oy '.list.[] | select(.attestationVariant == "gcp-sev-es") | .reference' info.json) IMAGE_NAME=$(echo "${FULL_REF}" | cut -d / -f 5) echo "reference=$IMAGE_NAME" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/workflows/purge-main.yml b/.github/workflows/purge-main.yml index 5a64705c4..1ec055c46 100644 --- a/.github/workflows/purge-main.yml +++ b/.github/workflows/purge-main.yml @@ -10,7 +10,7 @@ on: jobs: find-version: name: Delete version from main ref - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 outputs: version: ${{ steps.find.outputs.version }} permissions: @@ -18,12 +18,12 @@ jobs: contents: read steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.head_ref }} - name: Login to AWS - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRead aws-region: eu-central-1 @@ -47,8 +47,6 @@ jobs: ;; esac - - uses: ./.github/actions/setup_bazel_nix - - name: List versions id: list uses: ./.github/actions/versionsapi diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml deleted file mode 100644 index 2699b0895..000000000 --- a/.github/workflows/release-publish.yml +++ /dev/null @@ -1,79 +0,0 @@ -name: 'Release: on-publish' - -on: - release: - types: - - published - workflow_dispatch: - inputs: - tag: - description: 'Semantic version tag of the release (vX.Y.Z).' - required: true - -jobs: - post-release-actions: - runs-on: ubuntu-24.04 - permissions: - issues: write - env: - FULL_VERSION: ${{ github.event.release.tag_name }}${{ github.event.inputs.tag }} - GH_TOKEN: ${{ github.token }} - steps: - - name: Mark milestone as complete - run: | - milestones=$(gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/edgelesssys/constellation/milestones) - - current_milestone=$(echo "${milestones}" | jq -r ".[] | select(.title == \"${FULL_VERSION}\")") - echo "current milestone: ${current_milestone}" - if [[ -z "${current_milestone}" ]]; then - echo "milestone ${FULL_VERSION} does not exist, nothing to do..." - exit 0 - fi - - current_milestone_state=$(echo "${current_milestone}" | jq -r '.state') - echo "current milestone state: ${current_milestone_state}" - if [[ "${current_milestone_state}" != "open" ]]; then - echo "milestone ${FULL_VERSION} is already closed, nothing to do..." - exit 0 - fi - - milestone_number=$(echo "${current_milestone}" | jq -r '.number') - echo "milestone number: ${milestone_number}" - if [[ -z "${milestone_number}" ]]; then - echo "failed parsing milestone number" - exit 1 - fi - - gh api \ - --method PATCH \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - "/repos/edgelesssys/constellation/milestones/${milestone_number}" \ - -f state=closed - - - name: Create next milestone - run: | - WITHOUT_V=${FULL_VERSION#v} - PART_MAJOR=${WITHOUT_V%%.*} - PART_MINOR=${WITHOUT_V#*.} - PART_MINOR=${PART_MINOR%%.*} - NEXT_MINOR=v${PART_MAJOR}.$((PART_MINOR + 1)).0 - - gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/edgelesssys/constellation/milestones | - jq -r '.[].title' | \ - grep -xqF "${NEXT_MINOR}" && exit 0 - - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/edgelesssys/constellation/milestones \ - -f title="${NEXT_MINOR}" \ - -f state='open' \ - -f "due_on=$(date -d '2 months' +'%Y-%m-%dT00:00:00Z')" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 08e09da18..a09cbff11 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ concurrency: jobs: verify-inputs: name: Verify inputs - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 env: FULL_VERSION: ${{ inputs.version }} outputs: @@ -33,7 +33,7 @@ jobs: RELEASE_BRANCH: ${{ steps.version-info.outputs.RELEASE_BRANCH }} WORKING_BRANCH: ${{ steps.version-info.outputs.WORKING_BRANCH }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Working branch run: echo "WORKING_BRANCH=$(git branch --show-current)" | tee -a "$GITHUB_ENV" @@ -72,9 +72,10 @@ jobs: echo "WORKING_BRANCH=${WORKING_BRANCH}" } | tee -a "$GITHUB_OUTPUT" - update-main-branch: - name: Update main branch with release changes - runs-on: ubuntu-24.04 + docs: + name: Create docs release (from main) + runs-on: ubuntu-22.04 + if: inputs.kind == 'minor' needs: verify-inputs permissions: contents: write @@ -84,61 +85,36 @@ jobs: MAJOR_MINOR: ${{ needs.verify-inputs.outputs.MAJOR_MINOR }} BRANCH: docs/${{ needs.verify-inputs.outputs.MAJOR_MINOR }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: main - - name: Configure git - run: | - git config --global user.name "edgelessci" - git config --global user.email "edgelessci@users.noreply.github.com" - - name: Create docs release - if: inputs.kind == 'minor' working-directory: docs run: | - npm ci + npm install npm run docusaurus docs:version "${MAJOR_MINOR}" - git add . - git commit -am "docs: release ${MAJOR_MINOR}" - # Clean up auxiliary files, so next steps run on a clean tree - git clean -fdx :/ - - - name: Update version.txt - if: inputs.kind == 'minor' - run: | - pre_release_version="v${{ needs.verify-inputs.outputs.PART_MAJOR }}.$((${{ needs.verify-inputs.outputs.PART_MINOR }} + 1)).0-pre" - echo "${pre_release_version}" > version.txt - git add version.txt - git commit -m "chore: update version.txt to ${pre_release_version}" - - - name: Update CI for new version - run: | - sed -i 's/fromVersion: \["[^"]*"\]/fromVersion: ["${{ inputs.version }}"]/g' .github/workflows/e2e-test-release.yml - sed -i 's/fromVersion: \["[^"]*"\]/fromVersion: ["${{ inputs.version }}"]/g' .github/workflows/e2e-test-weekly.yml - name: Create docs pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + uses: peter-evans/create-pull-request@b1ddad2c994a25fbc81a28b3ec0e368bb2021c50 # v6.0.0 with: branch: ${{ env.BRANCH }} base: main - title: "Post ${{ env.VERSION }} release updates to main" + title: "docs: add release ${{ env.VERSION }}" body: | :robot: *This is an automated PR.* :robot: The PR is triggered as part of the automated release process of version ${{ env.VERSION }}. - commit-message: "chore: update CI for ${{ env.VERSION }}" + It releases a new version of the documentation. + commit-message: "docs: add release ${{ env.VERSION }}" committer: edgelessci - author: edgelessci labels: no changelog - assignees: ${{ github.actor }} - reviewers: ${{ github.actor }} # We need to push changes using a token, otherwise triggers like on:push and on:pull_request won't work. token: ${{ !github.event.pull_request.head.repo.fork && secrets.CI_COMMIT_PUSH_PR || '' }} check-working-branch: name: Check temporary working branch - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 needs: verify-inputs permissions: contents: write @@ -147,7 +123,7 @@ jobs: WORKING_BRANCH: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} @@ -176,7 +152,7 @@ jobs: update-versions: name: Update container image versions needs: [verify-inputs, check-working-branch] - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: write packages: read @@ -185,7 +161,7 @@ jobs: WITHOUT_V: ${{ needs.verify-inputs.outputs.WITHOUT_V }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} @@ -205,7 +181,7 @@ jobs: yq eval -i ".version = \"$WITHOUT_V\"" s3proxy/deploy/s3proxy/Chart.yaml yq eval -i ".image = \"ghcr.io/edgelesssys/constellation/s3proxy:$VERSION\"" s3proxy/deploy/s3proxy/values.yaml - git add s3proxy/deploy/s3proxy/Chart.yaml s3proxy/deploy/s3proxy/values.yaml + git add s3proxy/deploy/s3proxy/Chart.yaml - name: Commit run: | @@ -239,41 +215,25 @@ jobs: stream: "stable" ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} - check-measurements-reproducibility: - name: Check measurements reproducibility - needs: [verify-inputs, os-image] - runs-on: ubuntu-24.04 - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} - - - name: Check reproducibility - uses: ./.github/actions/check_measurements_reproducibility - with: - version: ${{ inputs.version }} - ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} - update-hardcoded-measurements: name: Update hardcoded measurements (in the CLI) needs: [verify-inputs, os-image] permissions: contents: write - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 env: VERSION: ${{ inputs.version }} WITHOUT_V: ${{ needs.verify-inputs.outputs.WITHOUT_V }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} - name: Setup Go environment - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: "1.24.3" + go-version: "1.22.1" cache: true - name: Build generateMeasurements tool @@ -290,12 +250,8 @@ jobs: run: | git config --global user.name "edgelessci" git config --global user.email "edgelessci@users.noreply.github.com" - if git diff-index --quiet HEAD --; then - echo "No changes to commit" - else - git commit -m "attestation: hardcode measurements for ${VERSION}" - git push - fi + git commit -m "attestation: hardcode measurements for ${VERSION}" + git push draft-release: name: Draft release (CLI) @@ -322,7 +278,6 @@ jobs: packages: write id-token: write contents: read - actions: write secrets: inherit with: ref: ${{ needs.verify-inputs.outputs.WORKING_BRANCH }} diff --git a/.github/workflows/reproducible-builds.yml b/.github/workflows/reproducible-builds.yml index 41aca0ac3..777d80ea2 100644 --- a/.github/workflows/reproducible-builds.yml +++ b/.github/workflows/reproducible-builds.yml @@ -1,9 +1,8 @@ # Verify that Constellation builds are reproducible. # -# The build-* jobs' matrix has three dimensions: a list of targets to build, a -# list of runners to build on and a method of installing dependencies. The -# produced binaries and OS images are expected to be bit-for-bit identical, -# without any dependencies on the runtime setup details. +# The build-* jobs' matrix has two dimensions: a list of targets to build and +# a list of runners to build on. The produced binaries and OS images are +# expected to be bit-for-bit identical, regardless of the chosen build runner. # # The compare-* jobs only have the target dimension. They obtain the built # targets from all runners and check that there are no diffs between them. @@ -13,9 +12,6 @@ on: workflow_dispatch: schedule: - cron: "45 06 * * 1" # Every Monday at 6:45am - pull_request: - paths: - - .github/workflows/reproducible-builds.yml jobs: build-binaries: @@ -28,39 +24,22 @@ jobs: - "cli_enterprise_linux_amd64" - "cli_enterprise_linux_arm64" - "cli_enterprise_windows_amd64" - runner: - - "ubuntu-24.04" - - "ubuntu-22.04" - deps: - - conventional - - eccentric + runner: ["ubuntu-22.04", "ubuntu-20.04"] env: bazel_target: "//cli:${{ matrix.target }}" - binary: "${{ matrix.target }}-${{ matrix.runner }}-${{ matrix.deps }}" + binary: "${{ matrix.target }}-${{ matrix.runner }}" runs-on: ${{ matrix.runner }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - - name: Setup dependencies + - name: Setup bazel uses: ./.github/actions/setup_bazel_nix - if: matrix.deps == 'conventional' - - - name: Setup dependencies (eccentric) - if: matrix.deps == 'eccentric' - run: | - bazelVersion=$(cat .bazelversion) - mkdir -p "$HOME/.local/bin" - curl -fsSL -o "$HOME/.local/bin/bazel" "https://github.com/bazelbuild/bazel/releases/download/$bazelVersion/bazel-$bazelVersion-linux-x86_64" - chmod a+x "$HOME/.local/bin/bazel" - echo "$HOME/.local/bin" >> "$GITHUB_PATH" - - curl -fsSL -o "$HOME/.local/bin/nix-installer" https://github.com/DeterminateSystems/nix-installer/releases/download/v3.4.2/nix-installer-x86_64-linux # renovate:github-release - nixVersion=$(cat .nixversion) - chmod a+x "$HOME/.local/bin/nix-installer" - "$HOME/.local/bin/nix-installer" install --no-confirm --nix-package-url "https://releases.nixos.org/nix/nix-$nixVersion/nix-$nixVersion-x86_64-linux.tar.xz" + with: + useCache: "logs" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Build shell: bash @@ -81,15 +60,15 @@ jobs: run: shasum -a 256 "${binary}" | tee "${binary}.sha256" - name: Upload binary artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: "binaries-${{ matrix.target }}-${{ matrix.runner }}-${{ matrix.deps }}" + name: "binaries-${{ matrix.target }}-${{ matrix.runner }}" path: "${{ env.binary }}" - name: Upload hash artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: "sha256sums-${{ matrix.target }}-${{ matrix.runner }}-${{ matrix.deps }}" + name: "sha256sums-${{ matrix.target }}-${{ matrix.runner }}" path: "${{ env.binary }}.sha256" build-osimages: @@ -101,31 +80,22 @@ jobs: - "aws_aws-nitro-tpm_console" - "qemu_qemu-vtpm_debug" - "gcp_gcp-sev-snp_nightly" - runner: ["ubuntu-24.04", "ubuntu-22.04"] + runner: ["ubuntu-22.04", "ubuntu-20.04"] env: bazel_target: "//image/system:${{ matrix.target }}" binary: "osimage-${{ matrix.target }}-${{ matrix.runner }}" runs-on: ${{ matrix.runner }} steps: - - name: Remove security hardening features - if: matrix.runner == 'ubuntu-24.04' - shell: bash - run: | - # Taken from https://github.com/systemd/mkosi/blob/fcacc94b9f72d9b6b1f03779b0c6e07209ceb54b/action.yaml#L42-L57. - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 - sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 - # This command fails with a non-zero error code even though it unloads the apparmor profiles. - # https://gitlab.com/apparmor/apparmor/-/issues/403 - sudo aa-teardown || true - sudo apt-get remove -y apparmor - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Setup bazel uses: ./.github/actions/setup_bazel_nix + with: + useCache: "logs" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Build shell: bash @@ -146,13 +116,13 @@ jobs: run: shasum -a 256 "${binary}" | tee "${binary}.sha256" - name: Upload binary artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: "osimages-${{ matrix.target }}-${{ matrix.runner }}" path: "${{ env.binary }}" - name: Upload hash artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: "sha256sums-${{ matrix.target }}-${{ matrix.runner }}" path: "${{ env.binary }}.sha256" @@ -168,14 +138,14 @@ jobs: - "cli_enterprise_linux_amd64" - "cli_enterprise_linux_arm64" - "cli_enterprise_windows_amd64" - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Download binaries - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: pattern: "binaries-${{ matrix.target }}-*" merge-multiple: true @@ -202,14 +172,14 @@ jobs: - "aws_aws-nitro-tpm_console" - "qemu_qemu-vtpm_debug" - "gcp_gcp-sev-snp_nightly" - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Download os images - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@eaceaf801fd36c7dee90939fad912460b18a1ffe # v4.1.2 with: pattern: "osimages-${{ matrix.target }}-*" merge-multiple: true diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 08a7faeac..938fb7495 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -9,7 +9,7 @@ on: jobs: analysis: name: Scorecard analysis - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: # Needed to upload the results to code-scanning dashboard. security-events: write @@ -18,25 +18,25 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: persist-credentials: false - name: Run analysis - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif publish_results: true - name: Upload artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: SARIF file path: results.sarif retention-days: 5 - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/upload-sarif@cf7e9f23492505046de9a37830c3711dd0f25bb3 # v2.16.2 with: sarif_file: results.sarif diff --git a/.github/workflows/sync-terraform-docs.yml b/.github/workflows/sync-terraform-docs.yml index 9bc2aac07..6eab70467 100644 --- a/.github/workflows/sync-terraform-docs.yml +++ b/.github/workflows/sync-terraform-docs.yml @@ -18,14 +18,14 @@ jobs: pull-requests: write steps: - name: Checkout constellation repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} fetch-depth: 0 path: constellation - name: Checkout terraform-provider-constellation repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: edgelesssys/terraform-provider-constellation ref: main @@ -40,7 +40,7 @@ jobs: - name: Create pull request id: create-pull-request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + uses: peter-evans/create-pull-request@b1ddad2c994a25fbc81a28b3ec0e368bb2021c50 # v6.0.0 with: path: terraform-provider-constellation branch: "feat/docs/update" diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index c6908ff3d..9b7a20027 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -20,17 +20,20 @@ on: jobs: integration-test: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 env: CTEST_OUTPUT_ON_FAILURE: True steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Setup bazel uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Integration Tests env: diff --git a/.github/workflows/test-operator-codegen.yml b/.github/workflows/test-operator-codegen.yml index b2f92ba7f..028ef981c 100644 --- a/.github/workflows/test-operator-codegen.yml +++ b/.github/workflows/test-operator-codegen.yml @@ -18,17 +18,17 @@ on: jobs: govulncheck: name: check-codegen - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} - name: Setup Go environment - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: - go-version: "1.24.3" + go-version: "1.22.1" cache: true - name: Run code generation diff --git a/.github/workflows/test-tfsec.yml b/.github/workflows/test-tfsec.yml index 5517ac887..59f0eee1f 100644 --- a/.github/workflows/test-tfsec.yml +++ b/.github/workflows/test-tfsec.yml @@ -17,13 +17,13 @@ on: jobs: tfsec: name: tfsec - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: contents: read pull-requests: write steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} diff --git a/.github/workflows/test-tidy.yml b/.github/workflows/test-tidy.yml index 25f06e174..d378d36a1 100644 --- a/.github/workflows/test-tidy.yml +++ b/.github/workflows/test-tidy.yml @@ -17,7 +17,7 @@ jobs: contents: read steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} # No token available for forks, so we can't push changes @@ -34,10 +34,11 @@ jobs: with: useCache: "rbe" rbePlatform: "ubuntu-22.04" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Assume AWS role to upload Bazel dependencies to S3 if: startsWith(github.head_ref, 'renovate/') - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationMirrorWrite aws-region: eu-central-1 @@ -51,9 +52,7 @@ jobs: - name: Run Bazel tidy shell: bash - run: | - bazel run //:tidy - bazel mod deps --lockfile_mode=update + run: bazel run //:tidy - name: Check if untidy id: untidy @@ -98,11 +97,10 @@ jobs: exit 0 fi - # Use quadruple backticks to escape triple backticks in diff'ed files. cat << EOF >> "${GITHUB_STEP_SUMMARY}" - \`\`\`\`diff + \`\`\`diff ${diff} - \`\`\`\` + \`\`\` EOF if [[ "${{ steps.untidy.outputs.untidy }}" == "true" ]] && diff --git a/.github/workflows/test-unittest.yml b/.github/workflows/test-unittest.yml index 7b64254dd..bb2745a27 100644 --- a/.github/workflows/test-unittest.yml +++ b/.github/workflows/test-unittest.yml @@ -30,7 +30,7 @@ jobs: pull-requests: write steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} fetch-depth: 0 @@ -40,6 +40,7 @@ jobs: with: useCache: "rbe" rbePlatform: "ubuntu-22.04" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Install AWS cli run: | @@ -49,7 +50,7 @@ jobs: rm -rf awscliv2.zip aws - name: Login to AWS (IAM role) - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubActionGocoverage aws-region: eu-central-1 @@ -69,7 +70,7 @@ jobs: - name: Comment coverage if: steps.coverage.outputs.uploadable == 'true' && github.event_name == 'pull_request' - uses: marocchino/sticky-pull-request-comment@67d0dec7b07ed060a405f9b2a64b8ab319fdd7db # v2.9.2 + uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2.9.0 with: header: coverage path: coverage_diff.md diff --git a/.github/workflows/update-rpms.yml b/.github/workflows/update-rpms.yml index c7146607e..6359eefed 100644 --- a/.github/workflows/update-rpms.yml +++ b/.github/workflows/update-rpms.yml @@ -7,24 +7,25 @@ on: jobs: update-rpms: - runs-on: "ubuntu-24.04" + runs-on: "ubuntu-22.04" permissions: id-token: write contents: read steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - token: ${{ secrets.CI_COMMIT_PUSH_PR }} + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Assume AWS role to upload Bazel dependencies to S3 - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationMirrorWrite aws-region: eu-central-1 - name: Setup bazel uses: ./.github/actions/setup_bazel_nix + with: + useCache: "true" + buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }} - name: Update rpms run: bazel run //image/mirror:update_packages @@ -39,7 +40,7 @@ jobs: fi - name: Create pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + uses: peter-evans/create-pull-request@b1ddad2c994a25fbc81a28b3ec0e368bb2021c50 # v6.0.0 with: branch: "image/automated/update-rpms-${{ github.run_number }}" base: main @@ -51,7 +52,6 @@ jobs: It updates the locked rpm packages that form the Constellation OS images. commit-message: "image: update locked rpms" committer: edgelessci - author: edgelessci labels: dependencies # We need to push changes using a token, otherwise triggers like on:push and on:pull_request won't work. token: ${{ !github.event.pull_request.head.repo.fork && secrets.CI_COMMIT_PUSH_PR || '' }} diff --git a/.github/workflows/versionsapi.yml b/.github/workflows/versionsapi.yml index 27acd9287..3a7adcf53 100644 --- a/.github/workflows/versionsapi.yml +++ b/.github/workflows/versionsapi.yml @@ -106,7 +106,7 @@ concurrency: jobs: versionsapi: - runs-on: ubuntu-24.04 + runs-on: ubuntu-22.04 permissions: id-token: write contents: read @@ -115,7 +115,7 @@ jobs: steps: - name: Check out repository id: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ !github.event.pull_request.head.repo.fork && github.head_ref || '' }} @@ -149,21 +149,21 @@ jobs: - name: Login to AWS without write access if: steps.check-rights.outputs.write == 'false' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRead aws-region: eu-central-1 - name: Login to AWS with write access if: steps.check-rights.outputs.write == 'true' && steps.check-rights.outputs.auth == 'false' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIWrite aws-region: eu-central-1 - name: Login to AWS with write and image remove access if: steps.check-rights.outputs.write == 'true' && steps.check-rights.outputs.auth == 'true' - uses: aws-actions/configure-aws-credentials@b47578312673ae6fa5b5096b330d9fbac3d116df # v4.2.1 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: role-to-assume: arn:aws:iam::795746500882:role/GithubConstellationVersionsAPIRemove aws-region: eu-central-1 @@ -180,8 +180,6 @@ jobs: with: service_account: "image-deleter@constellation-images.iam.gserviceaccount.com" - - uses: ./.github/actions/setup_bazel_nix - - name: Execute versionsapi CLI id: run uses: ./.github/actions/versionsapi diff --git a/.golangci.yml b/.golangci.yml index 2e5cd2d5c..2998b1416 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,65 +1,51 @@ -version: "2" run: + timeout: 10m build-tags: - integration - e2e modules-download-mode: readonly + skip-dirs: + - 3rdparty/node-maintenance-operator + output: - formats: - tab: - path: stdout - colors: false + format: tab + sort-results: true + linters: enable: + # Default linters + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # Additional linters - bodyclose - - copyloopvar - errname + - exportloopref - godot + - gofmt + - gofumpt - misspell - noctx - revive + - tenv - unconvert - unparam - - usetesting - settings: - errcheck: - exclude-functions: - - (*go.uber.org/zap.Logger).Sync - - (*google.golang.org/grpc.Server).Serve - exclusions: - generated: lax - presets: - - common-false-positives - - legacy - - std-error-handling - paths: - - 3rdparty/node-maintenance-operator - rules: - # TODO(burgerdev): these exclusions have been added to ease migration to v2 and should eventually be addressed. - - linters: ["staticcheck"] - text: "QF1008: could remove embedded field" - - linters: ["staticcheck"] - text: "QF1001: could apply De Morgan's law" - - linters: ["staticcheck"] - text: "ST1005: error strings should not be capitalized" - - linters: ["revive"] - text: "package-comments: package comment should be of the form" - - linters: ["revive"] - text: "package-comments: should have a package comment" - - linters: ["staticcheck"] - text: "QF1012: Use fmt.Fprintf" - - linters: ["staticcheck"] - text: "ST1019" - issues: max-issues-per-linter: 0 max-same-issues: 20 -formatters: - enable: - - gofmt - - gofumpt - exclusions: - generated: lax - paths: - - 3rdparty/node-maintenance-operator + include: + - EXC0012 + - EXC0014 + +linters-settings: + errcheck: + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (*go.uber.org/zap.Logger).Sync + - (*google.golang.org/grpc.Server).Serve diff --git a/.lychee.toml b/.lychee.toml index 70b1a2652..9106ef457 100644 --- a/.lychee.toml +++ b/.lychee.toml @@ -33,10 +33,6 @@ exclude = [ '^https://portal\.azure\.com/', # The Wireguard website sproadically returns 500. '^https://www\.wireguard\.com/', - # venturebeat detects our link checker - '^https://venturebeat\.com/', - # dev-docs reference the internal wiki - '^https://github\.com/edgelesssys/wiki', ] # Exclude these filesystem paths from getting checked. diff --git a/.nixversion b/.nixversion deleted file mode 100644 index 40a8d7f12..000000000 --- a/.nixversion +++ /dev/null @@ -1 +0,0 @@ -2.25.2 diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.need b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.need index 8542c18a4..d3cf0338e 100644 --- a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.need +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.need @@ -8,7 +8,10 @@ go_library( "internal_cross.go", ], cgo = True, - cdeps = ["//simulator/ms-tpm-20-ref:ms_tpm_20_ref"], + cdeps = select({ + "@//bazel/settings:tpm_simulator_enabled": ["//simulator/ms-tpm-20-ref:ms_tpm_20_ref"], + "//conditions:default": ["@//3rdparty/bazel/com_github_google_go_tpm_tools/placeholder:ms_tpm_20_ref_disabled"], + }), copts = [ "-fno-sanitize=all", # relax sanitizer checks for this test-only dependency ], diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.orig b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.orig index bb03b408a..3fc9a7d32 100644 --- a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.orig +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.orig @@ -73,15 +73,9 @@ go_library( "@io_bazel_rules_go//go/platform:openbsd": [ "-fstack-protector-all", ], - "@io_bazel_rules_go//go/platform:osx": [ - "-fstack-protector-all", - ], "@io_bazel_rules_go//go/platform:plan9": [ "-fstack-protector-all", ], - "@io_bazel_rules_go//go/platform:qnx": [ - "-fstack-protector-all", - ], "@io_bazel_rules_go//go/platform:solaris": [ "-fstack-protector-all", ], diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.patch b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.patch index ad04c2292..238e50f9e 100644 --- a/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.patch +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/com_github_google_go_tpm_tools.patch @@ -1,6 +1,6 @@ --- simulator/internal/BUILD.bazel +++ simulator/internal/BUILD.bazel -@@ -4,89 +4,14 @@ go_library( +@@ -4,83 +4,17 @@ name = "internal", srcs = [ "doc.go", @@ -17,8 +17,10 @@ - "-L/usr/local/opt/openssl/lib", - ], - "//conditions:default": [], -- }), -+ cdeps = ["//simulator/ms-tpm-20-ref:ms_tpm_20_ref"], ++ cdeps = select({ ++ "@//bazel/settings:tpm_simulator_enabled": ["//simulator/ms-tpm-20-ref:ms_tpm_20_ref"], ++ "//conditions:default": ["@//3rdparty/bazel/com_github_google_go_tpm_tools/placeholder:ms_tpm_20_ref_disabled"], + }), copts = [ - "-DALG_SHA512=ALG_YES", - "-DCERTIFYX509_DEBUG=NO", @@ -74,15 +76,9 @@ - "@io_bazel_rules_go//go/platform:openbsd": [ - "-fstack-protector-all", - ], -- "@io_bazel_rules_go//go/platform:osx": [ -- "-fstack-protector-all", -- ], - "@io_bazel_rules_go//go/platform:plan9": [ - "-fstack-protector-all", - ], -- "@io_bazel_rules_go//go/platform:qnx": [ -- "-fstack-protector-all", -- ], - "@io_bazel_rules_go//go/platform:solaris": [ - "-fstack-protector-all", - ], diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.need b/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.need index 7758874e6..83b388537 100644 --- a/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.need +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.need @@ -229,10 +229,8 @@ cc_library( ], deps = [ ":ms_tpm_20_ref_google_samples", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_arm64": ["@@org_openssl_aarch64-linux//:org_openssl"], - "@io_bazel_rules_go//go/platform:linux_amd64": ["@@org_openssl_x86_64-linux//:org_openssl"], - }), + "@//nix/cc:org_openssl", + ], target_compatible_with = [ "@platforms//os:linux", ] diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.patch b/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.patch index 8340c947f..e98141b8a 100644 --- a/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.patch +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/ms_tpm_20_ref.patch @@ -1,6 +1,6 @@ --- simulator/ms-tpm-20-ref/BUILD.bazel +++ simulator/ms-tpm-20-ref/BUILD.bazel -@@ -0,0 +1,516 @@ +@@ -0,0 +1,512 @@ +cc_library( + name = "ms_tpm_20_ref", + visibility = ["//visibility:public"], @@ -229,14 +229,10 @@ + ], + deps = [ + ":ms_tpm_20_ref_google_samples", -+ ] + select({ -+ "@io_bazel_rules_go//go/platform:darwin_arm64": ["@@org_openssl_aarch64-darwin//:org_openssl"], -+ "@io_bazel_rules_go//go/platform:darwin_amd64": ["@@org_openssl_x86_64-darwin//:org_openssl"], -+ "@io_bazel_rules_go//go/platform:linux_arm64": ["@@org_openssl_aarch64-linux//:org_openssl"], -+ "@io_bazel_rules_go//go/platform:linux_amd64": ["@@org_openssl_x86_64-linux//:org_openssl"], -+ }), ++ "@//nix/cc:org_openssl", ++ ], + target_compatible_with = [ -+ "@@platforms//os:linux", ++ "@platforms//os:linux", + ], +) + diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/BUILD.bazel b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/BUILD.bazel new file mode 100644 index 000000000..07d713e46 --- /dev/null +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/BUILD.bazel @@ -0,0 +1,12 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") + +cc_library( + name = "ms_tpm_20_ref_disabled", + srcs = ["ms_tpm_20_disabled.c"], + hdrs = [ + "Platform.h", + "Tpm.h", + ], + includes = ["."], + visibility = ["//visibility:public"], +) diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Platform.h b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Platform.h new file mode 100644 index 000000000..8aaa55d10 --- /dev/null +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Platform.h @@ -0,0 +1,22 @@ +#include +#include +#include + +extern int g_inFailureMode; + +typedef union { + uint16_t size; + uint8_t *buffer; +} TPM2B, TPM2B_SEED; +typedef struct +{ + TPM2B_SEED EPSeed; + TPM2B_SEED SPSeed; + TPM2B_SEED PPSeed; +} PERSISTENT_DATA; + +extern PERSISTENT_DATA gp; + +void _plat__Reset(bool forceManufacture); +void _plat__RunCommand(uint32_t requestSize, unsigned char *request, + uint32_t *responseSize, unsigned char **response); diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Tpm.h b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Tpm.h new file mode 100644 index 000000000..2e94e1749 --- /dev/null +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/Tpm.h @@ -0,0 +1,27 @@ +#undef TRUE +#define TRUE 1 +#undef FALSE +#define FALSE 0 +#undef YES +#define YES 1 +#undef NO +#define NO 0 +#undef SET +#define SET 1 +#undef CLEAR +#define CLEAR 0 +#ifndef MAX_RESPONSE_SIZE +#define MAX_RESPONSE_SIZE 4096 +#endif + +#ifndef EPSeed +#define EPSeed 1 +#endif +#ifndef SPSeed +#define SPSeed 1 +#endif +#ifndef PPSeed +#define PPSeed 1 +#endif + +#define NV_SYNC_PERSISTENT(x) diff --git a/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/ms_tpm_20_disabled.c b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/ms_tpm_20_disabled.c new file mode 100644 index 000000000..a56d7f43c --- /dev/null +++ b/3rdparty/bazel/com_github_google_go_tpm_tools/placeholder/ms_tpm_20_disabled.c @@ -0,0 +1,9 @@ +#include "Platform.h" +#include "Tpm.h" + +int g_inFailureMode = 0; + +void _plat__Reset(bool forceManufacture) {} + +void _plat__RunCommand(uint32_t requestSize, unsigned char *request, + uint32_t *responseSize, unsigned char **response) {} diff --git a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/BUILD.bazel b/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/BUILD.bazel deleted file mode 100644 index 9aa7e3f64..000000000 --- a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("//bazel/sh:def.bzl", "sh_template") - -sh_template( - name = "pull_files", - data = [ - "@com_github_kubernetes_sigs_aws_load_balancer_controller//:lb_policy", - ], - substitutions = { - "@@POLICY_SRC@@": "$(rootpath @com_github_kubernetes_sigs_aws_load_balancer_controller//:lb_policy)", - }, - template = "pull_files.sh", - visibility = ["//visibility:public"], -) diff --git a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/pull_files.sh b/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/pull_files.sh deleted file mode 100644 index 10a224890..000000000 --- a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/pull_files.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -###### script header ###### - -lib=$(realpath @@BASE_LIB@@) || exit 1 -stat "${lib}" >> /dev/null || exit 1 - -# shellcheck source=../../../bazel/sh/lib.bash -if ! source "${lib}"; then - echo "Error: could not find import" - exit 1 -fi - -controller_policy_source="@@POLICY_SRC@@" - -###### script body ###### - -controller_policy_real_source=$(realpath "${controller_policy_source}") - -cd "${BUILD_WORKSPACE_DIRECTORY}" # needs to be done after realpath - -targetDir="terraform/infrastructure/iam/aws/alb_policy.json" - -cp "${controller_policy_real_source}" "${targetDir}" diff --git a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/source.bzl b/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/source.bzl deleted file mode 100644 index e2c1e8034..000000000 --- a/3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller/source.bzl +++ /dev/null @@ -1,22 +0,0 @@ -"""A module defining the source of the AWS load balancer controller.""" - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -def aws_load_balancer_controller_deps(): - http_archive( - name = "com_github_kubernetes_sigs_aws_load_balancer_controller", - urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/422af7c03ebc73e1be6aea563475ec9ea6396071fa03158b9a3984aa621b8cb1", - "https://github.com/kubernetes-sigs/aws-load-balancer-controller/archive/refs/tags/v2.12.0.tar.gz", - ], - strip_prefix = "aws-load-balancer-controller-2.12.0", - build_file_content = """ -filegroup( - srcs = ["docs/install/iam_policy.json"], - name = "lb_policy", - visibility = ["//visibility:public"], -) - """, - type = "tar.gz", - sha256 = "422af7c03ebc73e1be6aea563475ec9ea6396071fa03158b9a3984aa621b8cb1", - ) diff --git a/3rdparty/bazel/com_github_martinjungblut_go_cryptsetup/com_github_martinjungblut_go_cryptsetup.patch b/3rdparty/bazel/com_github_martinjungblut_go_cryptsetup/com_github_martinjungblut_go_cryptsetup.patch index 0319cc797..f87c1a730 100644 --- a/3rdparty/bazel/com_github_martinjungblut_go_cryptsetup/com_github_martinjungblut_go_cryptsetup.patch +++ b/3rdparty/bazel/com_github_martinjungblut_go_cryptsetup/com_github_martinjungblut_go_cryptsetup.patch @@ -4,9 +4,9 @@ "plain.go", ], cgo = True, -+ cdeps = select({ -+ "@io_bazel_rules_go//go/platform:linux_amd64": ["@@cryptsetup_x86_64-linux//:cryptsetup"], -+ }), ++ cdeps = [ ++ "@//nix/cc:cryptsetup", ++ ], importpath = "github.com/martinjungblut/go-cryptsetup", visibility = ["//visibility:public"], ) diff --git a/3rdparty/bazel/com_github_medik8s_node_maintainance_operator/source.bzl b/3rdparty/bazel/com_github_medik8s_node_maintainance_operator/source.bzl index aa81fc947..a9d20a08c 100644 --- a/3rdparty/bazel/com_github_medik8s_node_maintainance_operator/source.bzl +++ b/3rdparty/bazel/com_github_medik8s_node_maintainance_operator/source.bzl @@ -6,10 +6,10 @@ def node_maintainance_operator_deps(): http_archive( name = "com_github_medik8s_node_maintainance_operator", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/6ccc7f152e5c595ab24eaadcda77870101eccc482694dc6f0d93be2528406ae2", - "https://github.com/medik8s/node-maintenance-operator/archive/refs/tags/v0.17.0.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/df5ea2f9d982dd78770f2549333fd40aaf40e50a28deec9d7892f83cf9d1bdb2", + "https://github.com/medik8s/node-maintenance-operator/archive/refs/tags/v0.15.0.tar.gz", ], - strip_prefix = "node-maintenance-operator-0.17.0", + strip_prefix = "node-maintenance-operator-0.15.0", build_file_content = """ api_v1beta1 = glob(["api/v1beta1/*.go"]) filegroup( @@ -19,5 +19,5 @@ filegroup( ) """, type = "tar.gz", - sha256 = "6ccc7f152e5c595ab24eaadcda77870101eccc482694dc6f0d93be2528406ae2", + sha256 = "df5ea2f9d982dd78770f2549333fd40aaf40e50a28deec9d7892f83cf9d1bdb2", ) diff --git a/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch b/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch index 5370f99e7..ac2da752f 100644 --- a/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch +++ b/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch @@ -1,11 +1,11 @@ --- src/crypto/tls/common.go +++ src/crypto/tls/common.go -@@ -64,7 +64,7 @@ const ( - maxCiphertext = 16384 + 2048 // maximum ciphertext payload length - maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3 - recordHeaderLen = 5 // record header length -- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) -+ maxHandshake = 262144 // maximum handshake we support (protocol max is 16 MB) - maxHandshakeCertificateMsg = 262144 // maximum certificate message size (256 KiB) - maxUselessRecords = 16 // maximum number of consecutive non-advancing records +@@ -62,7 +62,7 @@ + maxCiphertext = 16384 + 2048 // maximum ciphertext payload length + maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3 + recordHeaderLen = 5 // record header length +- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) ++ maxHandshake = 262144 // maximum handshake we support (protocol max is 16 MB) + maxUselessRecords = 16 // maximum number of consecutive non-advancing records ) + diff --git a/3rdparty/bazel/org_libvirt_go_libvirt/go_libvirt.patch b/3rdparty/bazel/org_libvirt_go_libvirt/go_libvirt.patch index 78150041c..d2c799cae 100644 --- a/3rdparty/bazel/org_libvirt_go_libvirt/go_libvirt.patch +++ b/3rdparty/bazel/org_libvirt_go_libvirt/go_libvirt.patch @@ -5,7 +5,7 @@ "typedparams.go", ], + cdeps = [ -+ "@@libvirt_x86_64-linux//:libvirt", ++ "@//nix/cc:libvirt", + ], cgo = True, importpath = "libvirt.org/go/libvirt", diff --git a/3rdparty/gcp-guest-agent/Dockerfile b/3rdparty/gcp-guest-agent/Dockerfile index f8fa7cfb5..e435bfbc3 100644 --- a/3rdparty/gcp-guest-agent/Dockerfile +++ b/3rdparty/gcp-guest-agent/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:24.04@sha256:a08e551cb33850e4740772b38217fc1796a66da2506d312abe51acda354ff061 as build +FROM ubuntu:22.04@sha256:2b7412e6465c3c7fc5bb21d3e6f1917c167358449fecac8176c6e496e5c1f05f as build # Install packages RUN apt-get update && apt-get install -y \ @@ -6,7 +6,7 @@ RUN apt-get update && apt-get install -y \ git # Install Go -ARG GO_VER=1.24.4 +ARG GO_VER=1.22.1 RUN wget -q https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \ tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \ rm go${GO_VER}.linux-amd64.tar.gz diff --git a/BUILD.bazel b/BUILD.bazel index 6e09ef814..b2443e4b5 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,5 +1,5 @@ exports_files([ - "WORKSPACE.bzlmod", + "WORKSPACE.bazel", ]) alias( diff --git a/CODEOWNERS b/CODEOWNERS index 0172574b9..69c1ac3e1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,64 +1,67 @@ # keep-sorted start -.golangci.yml @daniel-weisse -/3rdparty/gcp-guest-agent @msanft -/bazel @burgerdev -/bootstrapper @msanft +.golangci.yml @katexochen +/3rdparty/gcp-guest-agent @malt3 +/bazel @malt3 +/bazel/ci @katexochen +/bazel/sh @katexochen +/bootstrapper @3u13r /cli/internal/cloudcmd @daniel-weisse -/cli/internal/cmd/upgrade* @daniel-weisse +/cli/internal/cmd/upgrade* @derpsteb /cli/internal/libvirt @daniel-weisse -/cli/internal/terraform @daniel-weisse +/cli/internal/terraform @elchead /csi @daniel-weisse -/debugd @daniel-weisse +/debugd @malt3 /disk-mapper @daniel-weisse /docs @thomasten -/e2e @msanft -/hack/azure-snp-report-verify @msanft -/hack/bazel-deps-mirror @burgerdev -/hack/cli-k8s-compatibility @burgerdev +/e2e @3u13r +/hack/azure-snp-report-verify @derpsteb +/hack/bazel-deps-mirror @malt3 +/hack/cli-k8s-compatibility @derpsteb /hack/clidocgen @thomasten -/hack/fetch-broken-e2e @msanft -/hack/gocoverage @msanft -/hack/oci-pin @burgerdev +/hack/fetch-broken-e2e @katexochen +/hack/gocoverage @katexochen +/hack/oci-pin @malt3 /hack/qemu-metadata-api @daniel-weisse -/hack/remove-tf-providers @msanft -/hack/terraform @msanft -/hack/tools @msanft +/hack/remove-tf-providers @katexochen +/hack/terraform @3u13r +/hack/tools @katexochen /hack/versioninfogen @daniel-weisse -/image @msanft -/internal/api @daniel-weisse +/image @malt3 +/internal/api @derpsteb /internal/atls @thomasten /internal/attestation @daniel-weisse -/internal/cloud @msanft -/internal/compatibility @daniel-weisse -/internal/config @msanft -/internal/constellation/featureset @thomasten -/internal/constellation/helm @burgerdev +/internal/cloud @3u13r +/internal/compatibility @derpsteb +/internal/config @derpsteb +/internal/constellation/featureset @malt3 +/internal/constellation/helm @derpsteb /internal/constellation/kubecmd @daniel-weisse -/internal/constellation/state @msanft -/internal/containerimage @burgerdev +/internal/constellation/state @elchead +/internal/containerimage @malt3 /internal/crypto @thomasten /internal/cryptsetup @daniel-weisse /internal/file @daniel-weisse /internal/grpc @thomasten -/internal/imagefetcher @msanft -/internal/installer @msanft +/internal/imagefetcher @malt3 +/internal/installer @3u13r /internal/kms @daniel-weisse -/internal/kubernetes @msanft +/internal/kubernetes @malt3 /internal/license @thomasten /internal/logger @daniel-weisse /internal/nodestate @daniel-weisse -/internal/osimage @msanft -/internal/retry @msanft -/internal/semver @daniel-weisse -/internal/sigstore @burgerdev -/internal/staticupload @msanft -/internal/versions @msanft +/internal/osimage @malt3 +/internal/retry @katexochen +/internal/semver @derpsteb +/internal/sigstore @elchead +/internal/staticupload @malt3 +/internal/versions @3u13r /joinservice @daniel-weisse /keyservice @daniel-weisse /measurement-reader @daniel-weisse -/operators @msanft -/terraform-provider-constellation @msanft -/tools @burgerdev -/upgrade-agent @msanft +/operators @malt3 +/rpm @malt3 +/terraform-provider-constellation @msanft @elchead +/tools @malt3 +/upgrade-agent @3u13r /verify @daniel-weisse # keep-sorted end diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a123a5d58..9ee09c7ea 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,7 +12,7 @@ If you want to support our development: * Share our projects on social media * Join the [Confidential Computing Discord](https://discord.gg/rH8QTH56JN) -Constellation is licensed under the [BUSL](LICENSE). When contributing, you also need to agree to our [Contributor License Agreement](https://cla-assistant.io/edgelesssys/constellation). +Constellation is licensed under the [AGPLv3](LICENSE). When contributing, you also need to agree to our [Contributor License Agreement](https://cla-assistant.io/edgelesssys/constellation). ## Reporting issues and bugs, asking questions diff --git a/LICENSE b/LICENSE index 42435e491..be3f7b28e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,91 +1,661 @@ -Business Source License 1.1 + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -Parameters + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -Licensor: Edgeless Systems GmbH -Licensed Work: Constellation - The Licensed Work is (c) Edgeless Systems GmbH -Additional Use Grant: None + Preamble -Change Date: Four years from the date a MINOR version (SemVer) is published. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. -Change License: GNU Affero General Public License Version 3 (AGPL-3.0-only) + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. -For information about alternative licensing arrangements for the Software, -please visit: https://www.edgeless.systems/enterprise-support + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. -Notice + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. -License text copyright (c) 2023 MariaDB plc, All Rights Reserved. -“Business Source License” is a trademark of MariaDB plc. + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. ------------------------------------------------------------------------------ + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. -Business Source License 1.1 + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. -Terms + The precise terms and conditions for copying, distribution and +modification follow. -The Licensor hereby grants you the right to copy, modify, create derivative -works, redistribute, and make non-production use of the Licensed Work. The -Licensor may make an Additional Use Grant, above, permitting limited -production use. + TERMS AND CONDITIONS -Effective on the Change Date, or the fourth anniversary of the first publicly -available distribution of a specific version of the Licensed Work under this -License, whichever comes first, the Licensor hereby grants you rights under -the terms of the Change License, and the rights granted in the paragraph -above terminate. + 0. Definitions. -If your use of the Licensed Work does not comply with the requirements -currently in effect as described in this License, you must purchase a -commercial license from the Licensor, its affiliated entities, or authorized -resellers, or you must refrain from using the Licensed Work. + "This License" refers to version 3 of the GNU Affero General Public License. -All copies of the original and modified Licensed Work, and derivative works -of the Licensed Work, are subject to this License. This License applies -separately for each version of the Licensed Work and the Change Date may vary -for each version of the Licensed Work released by Licensor. + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. -You must conspicuously display this License on each original or modified copy -of the Licensed Work. If you receive the Licensed Work in original or -modified form from a third party, the terms and conditions set forth in this -License apply to your use of that work. + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. -Any use of the Licensed Work in violation of this License will automatically -terminate your rights under this License for the current and all other -versions of the Licensed Work. + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. -This License does not grant you any right in any trademark or logo of -Licensor or its affiliates (provided that you may use a trademark or logo of -Licensor as expressly required by this License). + A "covered work" means either the unmodified Program or a work based +on the Program. -TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON -AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, -EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND -TITLE. + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. -MariaDB hereby grants you permission to use this License’s text to license -your works, and to refer to it using the trademark “Business Source License”, -as long as you comply with the Covenants of Licensor below. + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. -Covenants of Licensor + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. -In consideration of the right to use this License’s text and the “Business -Source License” name and trademark, Licensor covenants to MariaDB, and to all -other recipients of the licensed work to be provided by Licensor: + 1. Source Code. -1. To specify as the Change License the GPL Version 2.0 or any later version, - or a license that is compatible with GPL Version 2.0 or a later version, - where “compatible” means that software provided under the Change License can - be included in a program with software provided under GPL Version 2.0 or a - later version. Licensor may specify additional Change Licenses without - limitation. + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. -2. To either: (a) specify an additional grant of rights to use that does not - impose any additional restriction on the right granted in this License, as - the Additional Use Grant; or (b) insert the text “None”. + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. -3. To specify a Change Date. + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. -4. Not to modify this License in any other way. + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/MODULE.bazel b/MODULE.bazel deleted file mode 100644 index 01097d490..000000000 --- a/MODULE.bazel +++ /dev/null @@ -1,209 +0,0 @@ -module(name = "constellation") - -bazel_dep(name = "aspect_bazel_lib", version = "2.19.4") - -bazel_lib = use_extension("@aspect_bazel_lib//lib:extensions.bzl", "toolchains") -bazel_lib.yq() -use_repo(bazel_lib, "jq_toolchains") -use_repo(bazel_lib, "yq_toolchains") - -bazel_dep(name = "bazel_skylib", version = "1.7.1") -bazel_dep(name = "gazelle", version = "0.43.0") -bazel_dep(name = "hermetic_cc_toolchain", version = "3.2.0") -bazel_dep(name = "rules_cc", version = "0.1.2") -bazel_dep(name = "rules_go", version = "0.55.1", repo_name = "io_bazel_rules_go") -bazel_dep(name = "rules_pkg", version = "1.1.0") -bazel_dep(name = "rules_proto", version = "7.1.0") -bazel_dep(name = "rules_python", version = "1.4.1") -bazel_dep(name = "rules_shell", version = "0.5.0") - -bazel_dep(name = "buildifier_prebuilt", version = "8.2.0.2", dev_dependency = True) - -go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download( - name = "go_sdk", - patches = ["//3rdparty/bazel/org_golang:go_tls_max_handshake_size.patch"], - version = "1.24.4", -) - -python = use_extension("@rules_python//python/extensions:python.bzl", "python") -python.toolchain( - ignore_root_user_error = True, - python_version = "3.11", -) - -# the use_repo rule needs to list all top-level go dependencies -# update automatically using `bazel mod tidy`. -go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") -go_deps.from_file(go_work = "//:go.work") -use_repo( - go_deps, - "cat_dario_mergo", - "com_github_aws_aws_sdk_go", - "com_github_aws_aws_sdk_go_v2", - "com_github_aws_aws_sdk_go_v2_config", - "com_github_aws_aws_sdk_go_v2_credentials", - "com_github_aws_aws_sdk_go_v2_feature_ec2_imds", - "com_github_aws_aws_sdk_go_v2_feature_s3_manager", - "com_github_aws_aws_sdk_go_v2_service_autoscaling", - "com_github_aws_aws_sdk_go_v2_service_cloudfront", - "com_github_aws_aws_sdk_go_v2_service_ec2", - "com_github_aws_aws_sdk_go_v2_service_elasticloadbalancingv2", - "com_github_aws_aws_sdk_go_v2_service_resourcegroupstaggingapi", - "com_github_aws_aws_sdk_go_v2_service_s3", - "com_github_aws_aws_sdk_go_v2_service_secretsmanager", - "com_github_aws_smithy_go", - "com_github_azure_azure_sdk_for_go", - "com_github_azure_azure_sdk_for_go_sdk_azcore", - "com_github_azure_azure_sdk_for_go_sdk_azidentity", - "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6", - "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v6", - "com_github_azure_azure_sdk_for_go_sdk_security_keyvault_azsecrets", - "com_github_azure_azure_sdk_for_go_sdk_storage_azblob", - "com_github_bazelbuild_buildtools", - "com_github_burntsushi_toml", - "com_github_coreos_go_systemd_v22", - "com_github_docker_docker", - "com_github_edgelesssys_go_azguestattestation", - "com_github_edgelesssys_go_tdx_qpl", - "com_github_foxboron_go_uefi", - "com_github_fsnotify_fsnotify", - "com_github_go_playground_locales", - "com_github_go_playground_universal_translator", - "com_github_go_playground_validator_v10", - "com_github_golang_jwt_jwt_v5", - "com_github_google_go_licenses", - "com_github_google_go_sev_guest", - "com_github_google_go_tdx_guest", - "com_github_google_go_tpm", - "com_github_google_go_tpm_tools", - "com_github_google_keep_sorted", - "com_github_google_uuid", - "com_github_googleapis_gax_go_v2", - "com_github_gophercloud_gophercloud_v2", - "com_github_gophercloud_utils_v2", - "com_github_grpc_ecosystem_go_grpc_middleware_v2", - "com_github_hashicorp_go_kms_wrapping_v2", - "com_github_hashicorp_go_kms_wrapping_wrappers_awskms_v2", - "com_github_hashicorp_go_kms_wrapping_wrappers_azurekeyvault_v2", - "com_github_hashicorp_go_kms_wrapping_wrappers_gcpckms_v2", - "com_github_hashicorp_go_version", - "com_github_hashicorp_hc_install", - "com_github_hashicorp_hcl_v2", - "com_github_hashicorp_terraform_exec", - "com_github_hashicorp_terraform_json", - "com_github_hashicorp_terraform_plugin_framework", - "com_github_hashicorp_terraform_plugin_framework_validators", - "com_github_hashicorp_terraform_plugin_go", - "com_github_hashicorp_terraform_plugin_log", - "com_github_hashicorp_terraform_plugin_testing", - "com_github_hexops_gotextdiff", - "com_github_katexochen_sh_v3", - "com_github_martinjungblut_go_cryptsetup", - "com_github_mattn_go_isatty", - "com_github_mitchellh_go_homedir", - "com_github_onsi_ginkgo_v2", - "com_github_onsi_gomega", - "com_github_pkg_errors", - "com_github_regclient_regclient", - "com_github_rogpeppe_go_internal", - "com_github_samber_slog_multi", - "com_github_schollz_progressbar_v3", - "com_github_secure_systems_lab_go_securesystemslib", - "com_github_siderolabs_talos_pkg_machinery", - "com_github_sigstore_rekor", - "com_github_sigstore_sigstore", - "com_github_spf13_afero", - "com_github_spf13_cobra", - "com_github_spf13_pflag", - "com_github_stretchr_testify", - "com_github_tink_crypto_tink_go_v2", - "com_github_vincent_petithory_dataurl", - "com_google_cloud_go_compute", - "com_google_cloud_go_compute_metadata", - "com_google_cloud_go_kms", - "com_google_cloud_go_secretmanager", - "com_google_cloud_go_storage", - "in_gopkg_yaml_v3", - "io_etcd_go_etcd_api_v3", - "io_etcd_go_etcd_client_pkg_v3", - "io_etcd_go_etcd_client_v3", - "io_k8s_api", - "io_k8s_apiextensions_apiserver", - "io_k8s_apimachinery", - "io_k8s_apiserver", - "io_k8s_client_go", - "io_k8s_cluster_bootstrap", - "io_k8s_kubelet", - "io_k8s_kubernetes", - "io_k8s_mount_utils", - "io_k8s_sigs_controller_runtime", - "io_k8s_sigs_yaml", - "io_k8s_utils", - "org_golang_google_api", - "org_golang_google_grpc", - "org_golang_google_protobuf", - "org_golang_x_crypto", - "org_golang_x_exp", - "org_golang_x_mod", - "org_golang_x_sys", - "org_golang_x_text", - "org_golang_x_tools", - "org_golang_x_vuln", - "org_libvirt_go_libvirt", - "org_uber_go_goleak", - "sh_helm_helm_v3", -) - -go_deps_with_disabled_proto_generation = [ - "go.etcd.io/etcd/api/v3", - "k8s.io/apiserver", - "github.com/hashicorp/go-plugin", -] - -[ - go_deps.gazelle_override( - directives = [ - "gazelle:go_generate_proto false", - ], - path = path, - ) - for path in go_deps_with_disabled_proto_generation -] - -go_deps.module_override( - patches = [ - "//3rdparty/bazel/com_github_martinjungblut_go_cryptsetup:com_github_martinjungblut_go_cryptsetup.patch", - ], - path = "github.com/martinjungblut/go-cryptsetup", -) -go_deps.module_override( - patches = [ - "//3rdparty/bazel/org_libvirt_go_libvirt:go_libvirt.patch", - ], - path = "libvirt.org/go/libvirt", -) -go_deps.module_override( - patches = [ - "//3rdparty/bazel/com_github_cloudflare_circl:math_fp448_BUILD_bazel.patch", - "//3rdparty/bazel/com_github_cloudflare_circl:math_fp25519_BUILD_bazel.patch", - "//3rdparty/bazel/com_github_cloudflare_circl:dh_x448_BUILD_bazel.patch", - "//3rdparty/bazel/com_github_cloudflare_circl:dh_x25519_BUILD_bazel.patch", - ], - path = "github.com/cloudflare/circl", -) -go_deps.module_override( - patches = [ - "//3rdparty/bazel/com_github_google_go_tpm_tools:com_github_google_go_tpm_tools.patch", - "//3rdparty/bazel/com_github_google_go_tpm_tools:ms_tpm_20_ref.patch", - "//3rdparty/bazel/com_github_google_go_tpm_tools:include.patch", - ], - path = "github.com/google/go-tpm-tools", -) - -# TODO(msanft): -# Remove once https://github.com/cncf/xds/issues/104 is resolved -go_deps.gazelle_override( - build_file_generation = "on", - path = "github.com/cncf/xds/go", -) diff --git a/MODULE.bazel.lock b/MODULE.bazel.lock deleted file mode 100644 index 4130b9d5d..000000000 --- a/MODULE.bazel.lock +++ /dev/null @@ -1,468 +0,0 @@ -{ - "lockFileVersion": 13, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497", - "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2", - "https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589", - "https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0", - "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb", - "https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16", - "https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915", - "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed", - "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da", - "https://bcr.bazel.build/modules/apple_support/1.5.0/MODULE.bazel": "50341a62efbc483e8a2a6aec30994a58749bd7b885e18dd96aa8c33031e558ef", - "https://bcr.bazel.build/modules/apple_support/1.5.0/source.json": "eb98a7627c0bc486b57f598ad8da50f6625d974c8f723e9ea71bd39f709c9862", - "https://bcr.bazel.build/modules/aspect_bazel_lib/2.14.0/MODULE.bazel": "2b31ffcc9bdc8295b2167e07a757dbbc9ac8906e7028e5170a3708cecaac119f", - "https://bcr.bazel.build/modules/aspect_bazel_lib/2.19.4/MODULE.bazel": "d39e4b18e594d81c526d7cfc513e7ecfa8ca9eb5b61488d1d790faa94b34f2d9", - "https://bcr.bazel.build/modules/aspect_bazel_lib/2.19.4/source.json": "506fa924e19fd8a33d617e33a17e4fce845f9ff9acb3a2aa7cf7300650698705", - "https://bcr.bazel.build/modules/aspect_bazel_lib/2.8.1/MODULE.bazel": "812d2dd42f65dca362152101fbec418029cc8fd34cbad1a2fde905383d705838", - "https://bcr.bazel.build/modules/bazel_features/1.1.0/MODULE.bazel": "cfd42ff3b815a5f39554d97182657f8c4b9719568eb7fded2b9135f084bf760b", - "https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd", - "https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8", - "https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d", - "https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d", - "https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a", - "https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58", - "https://bcr.bazel.build/modules/bazel_features/1.20.0/MODULE.bazel": "8b85300b9c8594752e0721a37210e34879d23adc219ed9dc8f4104a4a1750920", - "https://bcr.bazel.build/modules/bazel_features/1.21.0/MODULE.bazel": "675642261665d8eea09989aa3b8afb5c37627f1be178382c320d1b46afba5e3b", - "https://bcr.bazel.build/modules/bazel_features/1.28.0/MODULE.bazel": "4b4200e6cbf8fa335b2c3f43e1d6ef3e240319c33d43d60cc0fbd4b87ece299d", - "https://bcr.bazel.build/modules/bazel_features/1.28.0/source.json": "16a3fc5b4483cb307643791f5a4b7365fa98d2e70da7c378cdbde55f0c0b32cf", - "https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7", - "https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b", - "https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a", - "https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8", - "https://bcr.bazel.build/modules/bazel_skylib/1.1.1/MODULE.bazel": "1add3e7d93ff2e6998f9e118022c84d163917d912f5afafb3058e3d2f1545b5e", - "https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686", - "https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a", - "https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5", - "https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d", - "https://bcr.bazel.build/modules/bazel_skylib/1.4.2/MODULE.bazel": "3bd40978e7a1fac911d5989e6b09d8f64921865a45822d8b09e815eaa726a651", - "https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138", - "https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917", - "https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d", - "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b", - "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953", - "https://bcr.bazel.build/modules/buildifier_prebuilt/8.2.0.2/MODULE.bazel": "a9b689711d5b69f9db741649b218c119b9fdf82924ba390415037e09798edd03", - "https://bcr.bazel.build/modules/buildifier_prebuilt/8.2.0.2/source.json": "51eb0a4b38aaaeab7fa64361576d616c4d8bfd0f17a0a10184aeab7084d79f8e", - "https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84", - "https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8", - "https://bcr.bazel.build/modules/gazelle/0.32.0/MODULE.bazel": "b499f58a5d0d3537f3cf5b76d8ada18242f64ec474d8391247438bf04f58c7b8", - "https://bcr.bazel.build/modules/gazelle/0.33.0/MODULE.bazel": "a13a0f279b462b784fb8dd52a4074526c4a2afe70e114c7d09066097a46b3350", - "https://bcr.bazel.build/modules/gazelle/0.34.0/MODULE.bazel": "abdd8ce4d70978933209db92e436deb3a8b737859e9354fb5fd11fb5c2004c8a", - "https://bcr.bazel.build/modules/gazelle/0.36.0/MODULE.bazel": "e375d5d6e9a6ca59b0cb38b0540bc9a05b6aa926d322f2de268ad267a2ee74c0", - "https://bcr.bazel.build/modules/gazelle/0.43.0/MODULE.bazel": "846e1fe396eefc0f9ddad2b33e9bd364dd993fc2f42a88e31590fe0b0eefa3f0", - "https://bcr.bazel.build/modules/gazelle/0.43.0/source.json": "021a77f6625906d9d176e2fa351175e842622a5d45989312f2ad4924aab72df6", - "https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb", - "https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4", - "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6", - "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4", - "https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f", - "https://bcr.bazel.build/modules/hermetic_cc_toolchain/3.2.0/MODULE.bazel": "8e7faec81c1f0fb65fe277ecfc75ea3636ce7bf848f88037fedd58e6eeacc28f", - "https://bcr.bazel.build/modules/hermetic_cc_toolchain/3.2.0/source.json": "67c2b76edff27c3ec449a935fc9468996d1a730b52a9a6f97c40c8a06d381630", - "https://bcr.bazel.build/modules/jq.bzl/0.1.0/MODULE.bazel": "2ce69b1af49952cd4121a9c3055faa679e748ce774c7f1fda9657f936cae902f", - "https://bcr.bazel.build/modules/jq.bzl/0.1.0/source.json": "746bf13cac0860f091df5e4911d0c593971cd8796b5ad4e809b2f8e133eee3d5", - "https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075", - "https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d", - "https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902", - "https://bcr.bazel.build/modules/package_metadata/0.0.2/MODULE.bazel": "fb8d25550742674d63d7b250063d4580ca530499f045d70748b1b142081ebb92", - "https://bcr.bazel.build/modules/package_metadata/0.0.2/source.json": "e53a759a72488d2c0576f57491ef2da0cf4aab05ac0997314012495935531b73", - "https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5", - "https://bcr.bazel.build/modules/platforms/0.0.11/MODULE.bazel": "0daefc49732e227caa8bfa834d65dc52e8cc18a2faf80df25e8caea151a9413f", - "https://bcr.bazel.build/modules/platforms/0.0.11/source.json": "f7e188b79ebedebfe75e9e1d098b8845226c7992b307e28e1496f23112e8fc29", - "https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee", - "https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37", - "https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615", - "https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814", - "https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d", - "https://bcr.bazel.build/modules/platforms/0.0.9/MODULE.bazel": "4a87a60c927b56ddd67db50c89acaa62f4ce2a1d2149ccb63ffd871d5ce29ebc", - "https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7", - "https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c", - "https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d", - "https://bcr.bazel.build/modules/protobuf/29.0-rc2.bcr.1/MODULE.bazel": "52f4126f63a2f0bbf36b99c2a87648f08467a4eaf92ba726bc7d6a500bbf770c", - "https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df", - "https://bcr.bazel.build/modules/protobuf/29.0-rc3/MODULE.bazel": "33c2dfa286578573afc55a7acaea3cada4122b9631007c594bf0729f41c8de92", - "https://bcr.bazel.build/modules/protobuf/29.1/MODULE.bazel": "557c3457560ff49e122ed76c0bc3397a64af9574691cb8201b4e46d4ab2ecb95", - "https://bcr.bazel.build/modules/protobuf/29.1/source.json": "04cca85dce26b895ed037d98336d860367fe09919208f2ad383f0df1aff63199", - "https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0", - "https://bcr.bazel.build/modules/protobuf/3.19.2/MODULE.bazel": "532ffe5f2186b69fdde039efe6df13ba726ff338c6bc82275ad433013fa10573", - "https://bcr.bazel.build/modules/protobuf/3.19.6/MODULE.bazel": "9233edc5e1f2ee276a60de3eaa47ac4132302ef9643238f23128fea53ea12858", - "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e", - "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022", - "https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206", - "https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4", - "https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8", - "https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e", - "https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647", - "https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002", - "https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191", - "https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc", - "https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87", - "https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c", - "https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f", - "https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e", - "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5", - "https://bcr.bazel.build/modules/rules_cc/0.1.2/MODULE.bazel": "557ddc3a96858ec0d465a87c0a931054d7dcfd6583af2c7ed3baf494407fd8d0", - "https://bcr.bazel.build/modules/rules_cc/0.1.2/source.json": "53fcb09b5816c83ca60d9d7493faf3bfaf410dfc2f15deb52d6ddd146b8d43f0", - "https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6", - "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8", - "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e", - "https://bcr.bazel.build/modules/rules_go/0.41.0/MODULE.bazel": "55861d8e8bb0e62cbd2896f60ff303f62ffcb0eddb74ecb0e5c0cbe36fc292c8", - "https://bcr.bazel.build/modules/rules_go/0.42.0/MODULE.bazel": "8cfa875b9aa8c6fce2b2e5925e73c1388173ea3c32a0db4d2b4804b453c14270", - "https://bcr.bazel.build/modules/rules_go/0.46.0/MODULE.bazel": "3477df8bdcc49e698b9d25f734c4f3a9f5931ff34ee48a2c662be168f5f2d3fd", - "https://bcr.bazel.build/modules/rules_go/0.50.1/MODULE.bazel": "b91a308dc5782bb0a8021ad4330c81fea5bda77f96b9e4c117b9b9c8f6665ee0", - "https://bcr.bazel.build/modules/rules_go/0.55.1/MODULE.bazel": "a57a6fc59a74326c0b440d07cca209edf13c7d1a641e48cfbeab56e79f873609", - "https://bcr.bazel.build/modules/rules_go/0.55.1/source.json": "827a740c8959c9d20616889e7746cde4dcc6ee80d25146943627ccea0736328f", - "https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74", - "https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86", - "https://bcr.bazel.build/modules/rules_java/6.3.0/MODULE.bazel": "a97c7678c19f236a956ad260d59c86e10a463badb7eb2eda787490f4c969b963", - "https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31", - "https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a", - "https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6", - "https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab", - "https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe", - "https://bcr.bazel.build/modules/rules_java/7.6.5/MODULE.bazel": "481164be5e02e4cab6e77a36927683263be56b7e36fef918b458d7a8a1ebadb1", - "https://bcr.bazel.build/modules/rules_java/8.3.2/MODULE.bazel": "7336d5511ad5af0b8615fdc7477535a2e4e723a357b6713af439fe8cf0195017", - "https://bcr.bazel.build/modules/rules_java/8.5.1/MODULE.bazel": "d8a9e38cc5228881f7055a6079f6f7821a073df3744d441978e7a43e20226939", - "https://bcr.bazel.build/modules/rules_java/8.5.1/source.json": "db1a77d81b059e0f84985db67a22f3f579a529a86b7997605be3d214a0abe38e", - "https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7", - "https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909", - "https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036", - "https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0", - "https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197", - "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3", - "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5", - "https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0", - "https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d", - "https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c", - "https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb", - "https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc", - "https://bcr.bazel.build/modules/rules_pkg/1.0.1/MODULE.bazel": "5b1df97dbc29623bccdf2b0dcd0f5cb08e2f2c9050aab1092fd39a41e82686ff", - "https://bcr.bazel.build/modules/rules_pkg/1.1.0/MODULE.bazel": "9db8031e71b6ef32d1846106e10dd0ee2deac042bd9a2de22b4761b0c3036453", - "https://bcr.bazel.build/modules/rules_pkg/1.1.0/source.json": "fef768df13a92ce6067e1cd0cdc47560dace01354f1d921cfb1d632511f7d608", - "https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06", - "https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7", - "https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f", - "https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73", - "https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2", - "https://bcr.bazel.build/modules/rules_proto/7.1.0/MODULE.bazel": "002d62d9108f75bb807cd56245d45648f38275cb3a99dcd45dfb864c5d74cb96", - "https://bcr.bazel.build/modules/rules_proto/7.1.0/source.json": "39f89066c12c24097854e8f57ab8558929f9c8d474d34b2c00ac04630ad8940e", - "https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f", - "https://bcr.bazel.build/modules/rules_python/0.22.1/MODULE.bazel": "26114f0c0b5e93018c0c066d6673f1a2c3737c7e90af95eff30cfee38d0bbac7", - "https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300", - "https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382", - "https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed", - "https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58", - "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c", - "https://bcr.bazel.build/modules/rules_python/1.0.0/MODULE.bazel": "898a3d999c22caa585eb062b600f88654bf92efb204fa346fb55f6f8edffca43", - "https://bcr.bazel.build/modules/rules_python/1.4.1/MODULE.bazel": "8991ad45bdc25018301d6b7e1d3626afc3c8af8aaf4bc04f23d0b99c938b73a6", - "https://bcr.bazel.build/modules/rules_python/1.4.1/source.json": "8ec8c90c70ccacc4de8ca1b97f599e756fb59173e898ee08b733006650057c07", - "https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c", - "https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b", - "https://bcr.bazel.build/modules/rules_shell/0.4.1/MODULE.bazel": "00e501db01bbf4e3e1dd1595959092c2fadf2087b2852d3f553b5370f5633592", - "https://bcr.bazel.build/modules/rules_shell/0.5.0/MODULE.bazel": "8c8447370594d45539f66858b602b0bb2cb2d3401a4ebb9ad25830c59c0f366d", - "https://bcr.bazel.build/modules/rules_shell/0.5.0/source.json": "3038276f07cbbdd1c432d1f80a2767e34143ffbb03cfa043f017e66adbba324c", - "https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8", - "https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c", - "https://bcr.bazel.build/modules/stardoc/0.6.2/MODULE.bazel": "7060193196395f5dd668eda046ccbeacebfd98efc77fed418dbe2b82ffaa39fd", - "https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c", - "https://bcr.bazel.build/modules/stardoc/0.7.2/MODULE.bazel": "fc152419aa2ea0f51c29583fab1e8c99ddefd5b3778421845606ee628629e0e5", - "https://bcr.bazel.build/modules/stardoc/0.7.2/source.json": "58b029e5e901d6802967754adf0a9056747e8176f017cfe3607c0851f4d42216", - "https://bcr.bazel.build/modules/tar.bzl/0.2.1/MODULE.bazel": "52d1c00a80a8cc67acbd01649e83d8dd6a9dc426a6c0b754a04fe8c219c76468", - "https://bcr.bazel.build/modules/tar.bzl/0.2.1/source.json": "600ac6ff61744667a439e7b814ae59c1f29632c3984fccf8000c64c9db8d7bb6", - "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43", - "https://bcr.bazel.build/modules/yq.bzl/0.1.1/MODULE.bazel": "9039681f9bcb8958ee2c87ffc74bdafba9f4369096a2b5634b88abc0eaefa072", - "https://bcr.bazel.build/modules/yq.bzl/0.1.1/source.json": "2d2bad780a9f2b9195a4a370314d2c17ae95eaa745cefc2e12fbc49759b15aa3", - "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0", - "https://bcr.bazel.build/modules/zlib/1.2.12/MODULE.bazel": "3b1a8834ada2a883674be8cbd36ede1b6ec481477ada359cd2d3ddc562340b27", - "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.3/MODULE.bazel": "af322bc08976524477c79d1e45e241b6efbeb918c497e8840b8ab116802dda79", - "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.3/source.json": "2be409ac3c7601245958cd4fcdff4288be79ed23bd690b4b951f500d54ee6e7d", - "https://bcr.bazel.build/modules/zlib/1.3.1/MODULE.bazel": "751c9940dcfe869f5f7274e1295422a34623555916eb98c174c1e945594bf198" - }, - "selectedYankedVersions": {}, - "moduleExtensions": { - "@@apple_support~//crosstool:setup.bzl%apple_cc_configure_extension": { - "general": { - "bzlTransitiveDigest": "PjIds3feoYE8SGbbIq2SFTZy3zmxeO2tQevJZNDo7iY=", - "usagesDigest": "+hz7IHWN6A1oVJJWNDB6yZRG+RYhF76wAYItpAeIUIg=", - "recordedFileInputs": {}, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "local_config_apple_cc_toolchains": { - "bzlFile": "@@apple_support~//crosstool:setup.bzl", - "ruleClassName": "_apple_cc_autoconf_toolchains", - "attributes": {} - }, - "local_config_apple_cc": { - "bzlFile": "@@apple_support~//crosstool:setup.bzl", - "ruleClassName": "_apple_cc_autoconf", - "attributes": {} - } - }, - "recordedRepoMappingEntries": [ - [ - "apple_support~", - "bazel_tools", - "bazel_tools" - ] - ] - } - }, - "@@pybind11_bazel~//:python_configure.bzl%extension": { - "general": { - "bzlTransitiveDigest": "whINYge95GgPtysKDbNHQ0ZlWYdtKybHs5y2tLF+x7Q=", - "usagesDigest": "gNvOHVcAlwgDsNXD0amkv2CC96mnaCThPQoE44y8K+w=", - "recordedFileInputs": { - "@@pybind11_bazel~//MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e" - }, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "local_config_python": { - "bzlFile": "@@pybind11_bazel~//:python_configure.bzl", - "ruleClassName": "python_configure", - "attributes": {} - }, - "pybind11": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "build_file": "@@pybind11_bazel~//:pybind11.BUILD", - "strip_prefix": "pybind11-2.11.1", - "urls": [ - "https://github.com/pybind/pybind11/archive/v2.11.1.zip" - ] - } - } - }, - "recordedRepoMappingEntries": [ - [ - "pybind11_bazel~", - "bazel_tools", - "bazel_tools" - ] - ] - } - }, - "@@rules_fuzzing~//fuzzing/private:extensions.bzl%non_module_dependencies": { - "general": { - "bzlTransitiveDigest": "hVgJRQ3Er45/UUAgNn1Yp2Khcp/Y8WyafA2kXIYmQ5M=", - "usagesDigest": "YnIrdgwnf3iCLfChsltBdZ7yOJh706lpa2vww/i2pDI=", - "recordedFileInputs": {}, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "platforms": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "urls": [ - "https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz", - "https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz" - ], - "sha256": "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74" - } - }, - "rules_python": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "sha256": "d70cd72a7a4880f0000a6346253414825c19cdd40a28289bdf67b8e6480edff8", - "strip_prefix": "rules_python-0.28.0", - "url": "https://github.com/bazelbuild/rules_python/releases/download/0.28.0/rules_python-0.28.0.tar.gz" - } - }, - "bazel_skylib": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "sha256": "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", - "urls": [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz", - "https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz" - ] - } - }, - "com_google_absl": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "urls": [ - "https://github.com/abseil/abseil-cpp/archive/refs/tags/20240116.1.zip" - ], - "strip_prefix": "abseil-cpp-20240116.1", - "integrity": "sha256-7capMWOvWyoYbUaHF/b+I2U6XLMaHmky8KugWvfXYuk=" - } - }, - "rules_fuzzing_oss_fuzz": { - "bzlFile": "@@rules_fuzzing~//fuzzing/private/oss_fuzz:repository.bzl", - "ruleClassName": "oss_fuzz_repository", - "attributes": {} - }, - "honggfuzz": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "build_file": "@@rules_fuzzing~//:honggfuzz.BUILD", - "sha256": "6b18ba13bc1f36b7b950c72d80f19ea67fbadc0ac0bb297ec89ad91f2eaa423e", - "url": "https://github.com/google/honggfuzz/archive/2.5.zip", - "strip_prefix": "honggfuzz-2.5" - } - }, - "rules_fuzzing_jazzer": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_jar", - "attributes": { - "sha256": "ee6feb569d88962d59cb59e8a31eb9d007c82683f3ebc64955fd5b96f277eec2", - "url": "https://repo1.maven.org/maven2/com/code-intelligence/jazzer/0.20.1/jazzer-0.20.1.jar" - } - }, - "rules_fuzzing_jazzer_api": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_jar", - "attributes": { - "sha256": "f5a60242bc408f7fa20fccf10d6c5c5ea1fcb3c6f44642fec5af88373ae7aa1b", - "url": "https://repo1.maven.org/maven2/com/code-intelligence/jazzer-api/0.20.1/jazzer-api-0.20.1.jar" - } - } - }, - "recordedRepoMappingEntries": [ - [ - "rules_fuzzing~", - "bazel_tools", - "bazel_tools" - ] - ] - } - }, - "@@rules_java~//java:rules_java_deps.bzl%compatibility_proxy": { - "general": { - "bzlTransitiveDigest": "KIX40nDfygEWbU+rq3nYpt3tVgTK/iO8PKh5VMBlN7M=", - "usagesDigest": "pwHZ+26iLgQdwvdZeA5wnAjKnNI3y6XO2VbhOTeo5h8=", - "recordedFileInputs": {}, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "compatibility_proxy": { - "bzlFile": "@@rules_java~//java:rules_java_deps.bzl", - "ruleClassName": "_compatibility_proxy_repo_rule", - "attributes": {} - } - }, - "recordedRepoMappingEntries": [ - [ - "rules_java~", - "bazel_tools", - "bazel_tools" - ] - ] - } - }, - "@@rules_kotlin~//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": { - "general": { - "bzlTransitiveDigest": "fus14IFJ/1LGWWGKPH/U18VnJCoMjfDt1ckahqCnM0A=", - "usagesDigest": "aJF6fLy82rR95Ff5CZPAqxNoFgOMLMN5ImfBS0nhnkg=", - "recordedFileInputs": {}, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "com_github_jetbrains_kotlin_git": { - "bzlFile": "@@rules_kotlin~//src/main/starlark/core/repositories:compiler.bzl", - "ruleClassName": "kotlin_compiler_git_repository", - "attributes": { - "urls": [ - "https://github.com/JetBrains/kotlin/releases/download/v1.9.23/kotlin-compiler-1.9.23.zip" - ], - "sha256": "93137d3aab9afa9b27cb06a824c2324195c6b6f6179d8a8653f440f5bd58be88" - } - }, - "com_github_jetbrains_kotlin": { - "bzlFile": "@@rules_kotlin~//src/main/starlark/core/repositories:compiler.bzl", - "ruleClassName": "kotlin_capabilities_repository", - "attributes": { - "git_repository_name": "com_github_jetbrains_kotlin_git", - "compiler_version": "1.9.23" - } - }, - "com_github_google_ksp": { - "bzlFile": "@@rules_kotlin~//src/main/starlark/core/repositories:ksp.bzl", - "ruleClassName": "ksp_compiler_plugin_repository", - "attributes": { - "urls": [ - "https://github.com/google/ksp/releases/download/1.9.23-1.0.20/artifacts.zip" - ], - "sha256": "ee0618755913ef7fd6511288a232e8fad24838b9af6ea73972a76e81053c8c2d", - "strip_version": "1.9.23-1.0.20" - } - }, - "com_github_pinterest_ktlint": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_file", - "attributes": { - "sha256": "01b2e0ef893383a50dbeb13970fe7fa3be36ca3e83259e01649945b09d736985", - "urls": [ - "https://github.com/pinterest/ktlint/releases/download/1.3.0/ktlint" - ], - "executable": true - } - }, - "rules_android": { - "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", - "ruleClassName": "http_archive", - "attributes": { - "sha256": "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806", - "strip_prefix": "rules_android-0.1.1", - "urls": [ - "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip" - ] - } - } - }, - "recordedRepoMappingEntries": [ - [ - "rules_kotlin~", - "bazel_tools", - "bazel_tools" - ] - ] - } - }, - "@@rules_python~//python/uv:uv.bzl%uv": { - "general": { - "bzlTransitiveDigest": "Xpqjnjzy6zZ90Es9Wa888ZLHhn7IsNGbph/e6qoxzw8=", - "usagesDigest": "Tx9xMlyNlKU8Dq4pnYUJ8g2T1PHLiGoAs42AGwUIqh8=", - "recordedFileInputs": {}, - "recordedDirentsInputs": {}, - "envVariables": {}, - "generatedRepoSpecs": { - "uv": { - "bzlFile": "@@rules_python~//python/uv/private:uv_toolchains_repo.bzl", - "ruleClassName": "uv_toolchains_repo", - "attributes": { - "toolchain_type": "'@@rules_python~//python/uv:uv_toolchain_type'", - "toolchain_names": [ - "none" - ], - "toolchain_implementations": { - "none": "'@@rules_python~//python:none'" - }, - "toolchain_compatible_with": { - "none": [ - "@platforms//:incompatible" - ] - }, - "toolchain_target_settings": {} - } - } - }, - "recordedRepoMappingEntries": [ - [ - "rules_python~", - "platforms", - "platforms" - ] - ] - } - } - } -} diff --git a/README.md b/README.md index 4f2706fe5..11e249216 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ -![Constellation](docs/static/img/BannerConstellationanimated.svg) +![Constellation](docs/static/img/banner.svg) # Always Encrypted Kubernetes

+ Constellation License Govulncheck Go Report Discord @@ -32,7 +33,7 @@ Encrypting your K8s is good for: ### 🔒 Everything always encrypted -* Runtime encryption: All nodes run inside Confidential VMs (CVMs) based on AMD SEV or Intel TDX. +* Runtime encryption: All nodes run inside AMD SEV-based Confidential VMs (CVMs). Support for Intel TDX will be added in the future. * Transparent encryption of network: All [pod-to-pod traffic is automatically encrypted][network-encryption] * Transparent encryption of storage: All writes to persistent storage are automatically encrypted. This includes [nodes' state disks][storage-encryption], [persistent volumes via CSI][csi], and [S3 object storage][s3proxy]. @@ -50,14 +51,14 @@ Encrypting your K8s is good for: * High availability with multi-master architecture and stacked etcd topology * Dynamic cluster autoscaling with verification and secure bootstrapping of new nodes -* Competitive [performance] +* Competitive performance ([see K-Bench comparison with AKS and GKE][performance]) ### 🧩 Easy to use and integrate * Constellation is a [CNCF-certified][certified] Kubernetes. It's aligned to Kubernetes' [version support policy][k8s-version-support] and will likely work with your existing workloads and tools. -* Support for AWS, Azure, GCP, and STACKIT. +* Support for Azure, GCP, and AWS. * Support for local installations with [MiniConstellation][first-steps-local]. * Support for [Terraform][terraform-provider] @@ -73,6 +74,15 @@ If you're already familiar with Kubernetes, it's easy to get started with Conste Learn more: ["Getting started with Constellation" videos series](https://www.youtube.com/playlist?list=PLEhAl3D5WVvRYxO_yI7KzmtJ7rJUyQgNu). +## Live demos + +We're running public instances of popular software on Constellation: + +* Rocket.Chat: ([blog post](https://www.edgeless.systems/resource-library/rocket-chat)) +* GitLab: ([blog post](https://www.edgeless.systems/resource-library/confidential-gitlab/)) + +These instances run on CVMs in Azure and Constellation keeps them end-to-end confidential. + ## Documentation To learn more, see the [documentation](https://docs.edgeless.systems/constellation). @@ -86,7 +96,7 @@ You may want to start with one of the following sections. * If something doesn't work, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). * Please file an [issue][github-issues] to get help or report a bug. -* Join the [GitHub discussions](https://github.com/edgelesssys/constellation/discussions) if you have questions or would like to discuss an idea. +* Join the [Discord] to have a chat on confidential computing and Constellation. * Visit our [blog](https://www.edgeless.systems/blog/) for technical deep-dives and tutorials and follow us on [LinkedIn] for news. * Edgeless Systems also offers [Enterprise Support][enterprise-support]. @@ -102,7 +112,7 @@ Refer to [`CONTRIBUTING.md`](CONTRIBUTING.md) on how to contribute. The most imp ## License -Constellation is licensed under the [Business Source License 1.1](LICENSE). You may use it free of charge for non-production use. You can find more information in the [license] section of the docs. +The Constellation source code is licensed under the [GNU Affero General Public License v3.0](LICENSE). Edgeless Systems provides pre-built and signed binaries and images for Constellation. You may use these free of charge to create and run services for internal consumption, evaluation purposes, or non-commercial use. You can find more information in the [license] section of the docs. [architecture]: https://docs.edgeless.systems/constellation/architecture/overview @@ -110,6 +120,7 @@ Constellation is licensed under the [Business Source License 1.1](LICENSE). You [cla-assistant]: https://cla-assistant.io/edgelesssys/constellation [cluster-attestation]: https://docs.edgeless.systems/constellation/architecture/attestation#cluster-attestation [confidential-kubernetes]: https://docs.edgeless.systems/constellation/overview/confidential-kubernetes +[discord]: https://discord.gg/rH8QTH56JN [enterprise-support]: https://www.edgeless.systems/products/constellation/ [first-steps]: https://docs.edgeless.systems/constellation/getting-started/first-steps [first-steps-local]: https://docs.edgeless.systems/constellation/getting-started/first-steps-local diff --git a/WORKSPACE.bzlmod b/WORKSPACE.bazel similarity index 73% rename from WORKSPACE.bzlmod rename to WORKSPACE.bazel index 273d85e75..21c6e1982 100644 --- a/WORKSPACE.bzlmod +++ b/WORKSPACE.bazel @@ -1,5 +1,13 @@ workspace(name = "constellation") +load("//bazel/toolchains:skylib_deps.bzl", "skylib_deps") + +skylib_deps() + +load("//bazel/toolchains:cc_deps.bzl", "rules_cc_deps") + +rules_cc_deps() + # nixpkgs deps load("//bazel/toolchains:nixpkgs_deps.bzl", "nixpkgs_deps") @@ -132,6 +140,15 @@ register_mkosi( name = "mkosi_nix_toolchain", ) +# Python toolchain +load("//bazel/toolchains:python_deps.bzl", "python_deps") + +python_deps() + +load("@rules_python//python:repositories.bzl", "py_repositories") + +py_repositories() + nixpkgs_python_configure( fail_not_supported = False, python3_attribute_path = "python311", @@ -139,11 +156,55 @@ nixpkgs_python_configure( ) # Go toolchain +load("//bazel/toolchains:go_rules_deps.bzl", "go_deps") -# gazelle:repo gazelle +go_deps() + +load("//bazel/toolchains:go_module_deps.bzl", "go_dependencies") + +# gazelle:repository_macro bazel/toolchains/go_module_deps.bzl%go_dependencies +go_dependencies() + +load("@io_bazel_rules_go//go:deps.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies") + +go_download_sdk( + name = "go_sdk", + patches = ["//3rdparty/bazel/org_golang:go_tls_max_handshake_size.patch"], + version = "1.22.1", +) + +go_rules_dependencies() + +go_register_toolchains() + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") + +gazelle_dependencies(go_repository_default_config = "//:WORKSPACE.bazel") + +# gazelle:repo bazel_gazelle + +# proto toolchain +load("//bazel/toolchains:proto_deps.bzl", "proto_deps") + +proto_deps() + +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") + +rules_proto_dependencies() + +rules_proto_toolchains() + +# Buildifier +load("//bazel/toolchains:buildifier_deps.bzl", "buildifier_deps") + +buildifier_deps() # C / C++ toolchains +load("//bazel/toolchains:hermetic_cc_deps.bzl", "hermetic_cc_deps") + +hermetic_cc_deps() + load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains") # If needed, we can specify a specific version of the Zig toolchain to use. @@ -165,7 +226,6 @@ zig_toolchains() nixpkgs_cc_configure( name = "nixpkgs_cc_toolchain", - cc_std = "c++14", # TODO(malt3): Use clang once cc-wrapper path reset bug is fixed upstream. # attribute_path = "clang_11", repository = "@nixpkgs", @@ -173,7 +233,6 @@ nixpkgs_cc_configure( nixpkgs_cc_configure( name = "nixpkgs_cc_aarch64_darwin_x86_64_linux", - cc_std = "c++14", cross_cpu = "k8", exec_constraints = [ "@platforms//os:osx", @@ -208,6 +267,28 @@ register_toolchains( "@zig_sdk//toolchain:windows_amd64", ) +# Packaging rules (tar) +load("//bazel/toolchains:pkg_deps.bzl", "pkg_deps") + +pkg_deps() + +load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies") + +rules_pkg_dependencies() + +# Aspect Bazel Lib +load("//bazel/toolchains:aspect_bazel_lib.bzl", "aspect_bazel_lib") + +aspect_bazel_lib() + +load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains", "register_coreutils_toolchains", "register_yq_toolchains") + +aspect_bazel_lib_dependencies() + +aspect_bazel_lib_register_toolchains() + +register_coreutils_toolchains() + # OCI rules load("//bazel/toolchains:oci_deps.bzl", "oci_deps") @@ -217,14 +298,19 @@ load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies") rules_oci_dependencies() -load("@rules_oci//oci:repositories.bzl", "oci_register_toolchains") +load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "oci_register_toolchains") -oci_register_toolchains(name = "oci") +oci_register_toolchains( + name = "oci", + crane_version = LATEST_CRANE_VERSION, +) load("//bazel/toolchains:container_images.bzl", "containter_image_deps") containter_image_deps() +register_yq_toolchains() + # Multirun load("//bazel/toolchains:multirun_deps.bzl", "multirun_deps") @@ -234,10 +320,6 @@ load("//3rdparty/bazel/com_github_medik8s_node_maintainance_operator:source.bzl" node_maintainance_operator_deps() -load("//3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller:source.bzl", "aws_load_balancer_controller_deps") - -aws_load_balancer_controller_deps() - # CI deps load("//bazel/toolchains:ci_deps.bzl", "ci_deps") diff --git a/bazel/bazelrc/ci.bazelrc b/bazel/bazelrc/ci.bazelrc index dbae2df5e..4bc188e3d 100644 --- a/bazel/bazelrc/ci.bazelrc +++ b/bazel/bazelrc/ci.bazelrc @@ -62,10 +62,6 @@ build --remote_local_fallback # Docs: https://bazel.build/reference/command-line-reference#flag--grpc_keepalive_time build --grpc_keepalive_time=30s -# Use fallbacks in case proxy.golang.org is not reachable. -# Docs: https://go.dev/ref/mod#goproxy-protocol -common '--repo_env=GOPROXY=https://proxy.golang.org|https://goproxy.io|direct' - ###################################### # Edgeless specific # diff --git a/bazel/ci/BUILD.bazel b/bazel/ci/BUILD.bazel index 3fdcdcc8b..44e1c89c2 100644 --- a/bazel/ci/BUILD.bazel +++ b/bazel/ci/BUILD.bazel @@ -1,7 +1,6 @@ -load("@buildifier_prebuilt//:rules.bzl", "buildifier", "buildifier_test") -load("@com_github_ash2k_bazel_tools//multirun:def.bzl", "multirun") -load("@gazelle//:def.bzl", "gazelle") -load("//bazel/ci:go_bin_for_host.bzl", "go_bin_for_host") +load("@bazel_gazelle//:def.bzl", "gazelle") +load("@com_github_ash2k_bazel_tools//multirun:def.bzl", "command", "multirun") +load("@com_github_bazelbuild_buildtools//buildifier:def.bzl", "buildifier", "buildifier_test") load("//bazel/ci:proto_targets.bzl", "proto_targets") load("//bazel/sh:def.bzl", "noop_warn", "repo_command", "sh_template") @@ -10,6 +9,18 @@ required_tags = [ "integration", ] +# TODO(malt3): Remove this once we have a better solution for +# gazelle not respecting the default go env. +command( + name = "cmd_gazelle_update_repos", + command = ":gazelle_update_repos", + environment = { + "GOPROXY": "https://proxy.golang.org,direct", + "GOSUMDB": "sum.golang.org", + "GOTOOLCHAIN": "local", + }, +) + gazelle( name = "gazelle_generate", build_tags = required_tags, @@ -22,6 +33,18 @@ gazelle( mode = "diff", ) +gazelle( + name = "gazelle_update_repos", + args = [ + "-from_file=go.work", + "-to_macro=bazel/toolchains/go_module_deps.bzl%go_dependencies", + "-build_file_proto_mode=disable_global", + "-build_file_generation=on", + "-prune", + ], + command = "update-repos", +) + buildifier_test( name = "buildifier_check", timeout = "short", @@ -31,7 +54,7 @@ buildifier_test( no_sandbox = True, tags = ["no-remote-exec"], verbose = True, - workspace = "//:WORKSPACE.bzlmod", + workspace = "//:WORKSPACE.bazel", ) buildifier( @@ -57,10 +80,10 @@ sh_template( sh_template( name = "go_mod_tidy", data = [ - ":go_bin_for_host", + "@go_sdk//:bin/go", ], substitutions = { - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", }, template = "go_tidy.sh.in", ) @@ -234,10 +257,10 @@ sh_template( name = "golangci_lint", data = [ ":com_github_golangci_golangci_lint", - ":go_bin_for_host", + "@go_sdk//:bin/go", ], substitutions = { - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", "@@GOLANGCI-LINT@@": "$(rootpath :com_github_golangci_golangci_lint)", }, template = "golangci_lint.sh.in", @@ -267,11 +290,11 @@ sh_template( sh_template( name = "golicenses_check", data = [ - ":go_bin_for_host", "@com_github_google_go_licenses//:go-licenses", + "@go_sdk//:bin/go", ], substitutions = { - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", "@@GO_LICENSES@@": "$(rootpath @com_github_google_go_licenses//:go-licenses)", }, template = "golicenses.sh.in", @@ -287,14 +310,12 @@ sh_template( sh_template( name = "govulncheck", data = [ - ":go_bin_for_host", - "@jq_toolchains//:resolved_toolchain", + "@go_sdk//:bin/go", "@org_golang_x_vuln//cmd/govulncheck", ], substitutions = { - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", "@@GOVULNCHECK@@": "$(rootpath @org_golang_x_vuln//cmd/govulncheck:govulncheck)", - "@@JQ@@": "$(rootpath @jq_toolchains//:resolved_toolchain)", }, template = "govulncheck.sh.in", ) @@ -324,15 +345,15 @@ sh_template( data = [ ":com_github_helm_helm", ":com_github_siderolabs_talos_hack_docgen", - ":go_bin_for_host", "//internal/attestation/measurements/measurement-generator", "//internal/versions/hash-generator", + "@go_sdk//:bin/go", "@org_golang_x_tools//cmd/stringer", "@yq_toolchains//:resolved_toolchain", ], substitutions = { "@@DOCGEN@@": "$(rootpath :com_github_siderolabs_talos_hack_docgen)", - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", "@@HASH_GENERATOR@@": "$(rootpath //internal/versions/hash-generator:hash-generator)", "@@HELM@@": "$(rootpath :com_github_helm_helm)", "@@MEASUREMENT_GENERATOR@@": "$(rootpath //internal/attestation/measurements/measurement-generator:measurement-generator)", @@ -478,19 +499,14 @@ sh_template( template = "unused_gh_actions.sh.in", ) -go_bin_for_host( - name = "go_bin_for_host", - visibility = ["//visibility:private"], -) - sh_template( name = "gocoverage_diff", data = [ - ":go_bin_for_host", "//hack/gocoverage", + "@go_sdk//:bin/go", ], substitutions = { - "@@GO@@": "$(rootpath :go_bin_for_host)", + "@@GO@@": "$(rootpath @go_sdk//:bin/go)", "@@GOCOVERAGE@@": "$(rootpath //hack/gocoverage:gocoverage)", }, template = "gocoverage_diff.sh.in", @@ -502,6 +518,7 @@ multirun( ":shfmt", ":gofumpt", ":go_mod_tidy", + ":cmd_gazelle_update_repos", ":gazelle_generate", ":buildifier_fix", ":terraform_fmt", @@ -514,14 +531,16 @@ multirun( ) multirun( - name = "parallel_checks", + name = "check", testonly = True, commands = [ ":gazelle_check", ":buildifier_check", + ":golangci_lint", ":terraform_check", ":golicenses_check", ":license_header_check", + ":govulncheck", ":deps_mirror_check", ":proto_targets_check", ":unused_gh_actions", @@ -541,48 +560,16 @@ multirun( ) multirun( - name = "check", - testonly = True, - commands = [ - ":parallel_checks", - ":golangci_lint", - ":govulncheck", - ], - jobs = 1, # execute sequentially to avoid running into memory issues on our CI runners - stop_on_error = False, - visibility = ["//visibility:public"], -) - -multirun( - name = "generate_files", + name = "generate", commands = [ ":terraform_gen", "//3rdparty/bazel/com_github_medik8s_node_maintainance_operator:pull_files", - "//3rdparty/bazel/com_github_kubernetes_sigs_aws_load_balancer_controller:pull_files", ":go_generate", ":proto_generate", - ], - jobs = 0, # execute concurrently - visibility = ["//visibility:public"], -) - -multirun( - name = "generate_docs", - commands = [ ":cli_docgen", ":terraform_docgen", + ":version_info_gen", ], jobs = 0, # execute concurrently visibility = ["//visibility:public"], ) - -multirun( - name = "generate", - commands = [ - ":generate_files", - ":generate_docs", - ":version_info_gen", - ], - jobs = 1, # execute sequentially - visibility = ["//visibility:public"], -) diff --git a/bazel/ci/go_bin_for_host.bzl b/bazel/ci/go_bin_for_host.bzl deleted file mode 100644 index 29721b2e2..000000000 --- a/bazel/ci/go_bin_for_host.bzl +++ /dev/null @@ -1,29 +0,0 @@ -""" -Go toolchain for the host platformS -Inspired by https://github.com/bazel-contrib/rules_go/blob/6e4fdcfeb1a333b54ab39ae3413d4ded46d8958d/go/private/rules/go_bin_for_host.bzl -""" - -load("@local_config_platform//:constraints.bzl", "HOST_CONSTRAINTS") - -GO_TOOLCHAIN = "@io_bazel_rules_go//go:toolchain" - -def _ensure_target_cfg(ctx): - if "-exec" in ctx.bin_dir.path or "/host/" in ctx.bin_dir.path: - fail("exec not found") - -def _go_bin_for_host_impl(ctx): - _ensure_target_cfg(ctx) - sdk = ctx.toolchains[GO_TOOLCHAIN].sdk - sdk_files = ctx.runfiles([sdk.go] + sdk.headers.to_list() + sdk.libs.to_list() + sdk.srcs.to_list() + sdk.tools.to_list()) - return [ - DefaultInfo( - files = depset([sdk.go]), - runfiles = sdk_files, - ), - ] - -go_bin_for_host = rule( - implementation = _go_bin_for_host_impl, - toolchains = [GO_TOOLCHAIN], - exec_compatible_with = HOST_CONSTRAINTS, -) diff --git a/bazel/ci/golicenses.sh.in b/bazel/ci/golicenses.sh.in index 4f3eb78e9..2189b511d 100644 --- a/bazel/ci/golicenses.sh.in +++ b/bazel/ci/golicenses.sh.in @@ -57,7 +57,11 @@ license_report() { AGPL-3.0) case ${pkg} in - github.com/edgelesssys/go-tdx-qpl) ;; + github.com/edgelesssys/constellation/v2) ;; + + github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1) ;; + + github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api) ;; *) not_allowed @@ -67,7 +71,7 @@ license_report() { Unknown) case ${pkg} in - github.com/edgelesssys/constellation/v2/*) ;; + github.com/edgelesssys/go-tdx-qpl/*) ;; *) not_allowed diff --git a/bazel/ci/govulncheck.sh.in b/bazel/ci/govulncheck.sh.in index 28da4441f..0ea07b10c 100644 --- a/bazel/ci/govulncheck.sh.in +++ b/bazel/ci/govulncheck.sh.in @@ -15,8 +15,6 @@ go=$(realpath @@GO@@) stat "${go}" >> /dev/null govulncheck=$(realpath @@GOVULNCHECK@@) stat "${govulncheck}" >> /dev/null -jq=$(realpath @@JQ@@) -stat "${jq}" >> /dev/null cd "${BUILD_WORKSPACE_DIRECTORY}" @@ -26,32 +24,19 @@ submodules=$(${go} list -f '{{.Dir}}' -m) PATH=$(dirname "${go}"):${PATH} -check_module() { - excluded_osvs=( - "GO-2025-3521" # Kubernetes GitRepo Volume Inadvertent Local Repository Access - "GO-2025-3547" # Kubernetes kube-apiserver Vulnerable to Race Condition - "GO-2025-3770" # Host Header Injection which Leads to Open Redirect in RedirectSlashes in github.com/go-chi/chi - ) - - # shellcheck disable=SC2016 # The $ sign in the single quoted string is correct. - CGO_ENABLED=0 ${govulncheck} -C "$1" -format json "./..." | - "${jq}" --argjson excluded "$(printf '"%s"\n' "${excluded_osvs[@]}" | jq -s)" -sr ' - (map(select(.osv) | {"key": .osv.id, "value": .osv.summary}) | from_entries) as $osvs | - map(select( .finding and all($excluded[] != .finding.osv; .) ) | .finding | select( .trace[-1].module | startswith("github.com/edgelesssys/") )) | - group_by(.osv) | - map( {"osv": .[0].osv, "summary": $osvs[.[0].osv], "traces": [.[] | [.trace[] | .module]]} ) | - if length > 0 then halt_error(1) else .[] end' - -} - check() { err=0 - echo "Scanning Go vulnerability DB for known vulnerabilities in modules:" + echo "Scanning Go vulnerability DB for knwon vulnerabilities in modules:" for mod in ${submodules}; do echo " ${mod}" echo -n " " - check_module "${mod}" + CGO_ENABLED=0 ${govulncheck} -C "${mod}" "./..." | + tail -n 2 | # Providing some nice output... + tr '\n' ' ' | + sed s/" your code and"// && + printf "\n" || + err=$? done exit "${err}" diff --git a/bazel/ci/license_header.sh.in b/bazel/ci/license_header.sh.in index 8278769cd..4e5ce470c 100644 --- a/bazel/ci/license_header.sh.in +++ b/bazel/ci/license_header.sh.in @@ -25,7 +25,7 @@ noHeader=$( --include='*.go' \ --exclude-dir 3rdparty \ --exclude-dir build \ - -e'SPDX-License-Identifier: BUSL-1.1' \ + -e'SPDX-License-Identifier: AGPL-3.0-only' \ -e'DO NOT EDIT' | { grep -v internal/cloud/openstack/clouds || true; } ) diff --git a/bazel/ci/terraform.sh.in b/bazel/ci/terraform.sh.in index 777049106..121b2313d 100644 --- a/bazel/ci/terraform.sh.in +++ b/bazel/ci/terraform.sh.in @@ -46,6 +46,7 @@ excludeDirs=( excludeLockDirs=( "build" "terraform-provider-constellation" + "terraform/legacy-module" ) excludeCheckDirs=( @@ -142,7 +143,6 @@ check() { done echo "This may take 5-10 min..." for module in "${terraformLockModules[@]}"; do - echo "Generating lock file for ${module}" ${terraform} -chdir="${module}" init > /dev/null ${terraform} -chdir="${module}" providers lock -platform=linux_arm64 > /dev/null ${terraform} -chdir="${module}" providers lock -platform=linux_amd64 > /dev/null diff --git a/bazel/mkosi/mkosi_image.bzl b/bazel/mkosi/mkosi_image.bzl index fb11a81c2..dbb3c9a8b 100644 --- a/bazel/mkosi/mkosi_image.bzl +++ b/bazel/mkosi/mkosi_image.bzl @@ -2,9 +2,6 @@ load("@bazel_skylib//lib:paths.bzl", "paths") -def _resource_set(_os, _num_inputs): - return {"cpu": 4, "memory": 4096} - def _mkosi_image_impl(ctx): args = ctx.actions.args() inputs = [] @@ -70,6 +67,8 @@ def _mkosi_image_impl(ctx): args.add("--kernel-command-line", ctx.attr.kernel_command_line) for key, value in ctx.attr.kernel_command_line_dict.items(): args.add("--kernel-command-line", "{}={}".format(key, value)) + if ctx.attr.autologin: + args.add("--autologin", "yes") info = ctx.toolchains["@constellation//bazel/mkosi:toolchain_type"].mkosi if not info.valid: @@ -100,7 +99,6 @@ def _mkosi_image_impl(ctx): execution_requirements = {"no-remote": "1", "no-sandbox": "1"}, progress_message = "Building mkosi image " + ctx.label.name, env = env, - resource_set = _resource_set, ) return DefaultInfo(files = depset(outputs), runfiles = ctx.runfiles(outputs)) @@ -108,6 +106,7 @@ mkosi_image = rule( implementation = _mkosi_image_impl, attrs = { "architecture": attr.string(), + "autologin": attr.bool(), "base_trees": attr.label_list(allow_files = True), "distribution": attr.string(), "env": attr.string_dict(), diff --git a/bazel/proto/rules.bzl b/bazel/proto/rules.bzl index 3807796b2..da73d5f72 100644 --- a/bazel/proto/rules.bzl +++ b/bazel/proto/rules.bzl @@ -5,14 +5,17 @@ based on https://github.com/bazelbuild/rules_go/issues/2111#issuecomment-1355927 """ load("@aspect_bazel_lib//lib:write_source_files.bzl", "write_source_files") -load("@io_bazel_rules_go//go:def.bzl", "GoInfo") +load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", "go_context") load("@io_bazel_rules_go//proto:compiler.bzl", "GoProtoCompiler") def _output_go_library_srcs_impl(ctx): + go = go_context(ctx) + srcs_of_library = [] importpath = "" for src in ctx.attr.deps: - lib = src[GoInfo] + lib = src[GoLibrary] + go_src = go.library_to_source(go, ctx.attr, lib, False) if importpath and lib.importpath != importpath: fail( "importpath of all deps must match, got {} and {}", @@ -20,7 +23,7 @@ def _output_go_library_srcs_impl(ctx): lib.importpath, ) importpath = lib.importpath - srcs_of_library.extend(lib.srcs) + srcs_of_library.extend(go_src.srcs) if len(srcs_of_library) != 1: fail("expected exactly one src for library, got {}", len(srcs_of_library)) @@ -51,7 +54,7 @@ output_go_library_srcs = rule( default = "@io_bazel_rules_go//proto:go_proto", ), "deps": attr.label_list( - providers = [GoInfo], + providers = [GoLibrary], aspects = [], ), "out": attr.output( diff --git a/bazel/release/artifacts/BUILD.bazel b/bazel/release/artifacts/BUILD.bazel index 8861d6dfe..bba7fb0c8 100644 --- a/bazel/release/artifacts/BUILD.bazel +++ b/bazel/release/artifacts/BUILD.bazel @@ -70,5 +70,5 @@ go_test( env = platform_container_sums_paths | platform_clis_paths, # keep x_defs = {"runsUnder": "bazel"}, - deps = ["@io_bazel_rules_go//go/runfiles"], + deps = ["@io_bazel_rules_go//go/runfiles:go_default_library"], ) diff --git a/bazel/release/artifacts/artifacts_test.go b/bazel/release/artifacts/artifacts_test.go index 0c23a3f41..3a093d21a 100644 --- a/bazel/release/artifacts/artifacts_test.go +++ b/bazel/release/artifacts/artifacts_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package artifacts diff --git a/bazel/sh/BUILD.bazel b/bazel/sh/BUILD.bazel index 7a347a852..cac8fda5a 100644 --- a/bazel/sh/BUILD.bazel +++ b/bazel/sh/BUILD.bazel @@ -1,5 +1,3 @@ -load("@rules_shell//shell:sh_library.bzl", "sh_library") - exports_files([ "repo_command.sh.in", "noop_warn.sh.in", diff --git a/bazel/sh/def.bzl b/bazel/sh/def.bzl index 4ddad1fe0..5f2129017 100644 --- a/bazel/sh/def.bzl +++ b/bazel/sh/def.bzl @@ -1,8 +1,6 @@ """Bazel rules for CI and dev tooling""" load("@bazel_skylib//lib:shell.bzl", "shell") -load("@rules_shell//shell:sh_binary.bzl", "sh_binary") -load("@rules_shell//shell:sh_test.bzl", "sh_test") def _sh_template_impl(ctx): out_file = ctx.actions.declare_file(ctx.label.name + ".bash") @@ -68,7 +66,8 @@ def sh_template(name, **kwargs): template = template, toolchains = toolchains, ) - sh_binary( + + native.sh_binary( name = name, srcs = [script_name], data = [script_name] + data, @@ -96,7 +95,8 @@ def sh_test_template(name, **kwargs): substitutions = substitutions, template = template, ) - sh_test( + + native.sh_test( name = name, srcs = [script_name], data = [script_name] + data, diff --git a/bazel/toolchains/0001-disable-Windows-support.patch b/bazel/toolchains/0001-disable-Windows-support.patch deleted file mode 100644 index 1cb9c8340..000000000 --- a/bazel/toolchains/0001-disable-Windows-support.patch +++ /dev/null @@ -1,75 +0,0 @@ -From d10473f4ac89c23dcd8ea02488b28a649f4a9735 Mon Sep 17 00:00:00 2001 -From: Markus Rudy -Date: Tue, 6 Aug 2024 11:33:29 +0200 -Subject: [PATCH] disable Windows support - -It's broken and we don't need it, see -https://github.com/bazel-contrib/rules_oci/issues/420. ---- - oci/private/image.bzl | 9 --------- - oci/private/util.bzl | 29 +---------------------------- - 2 files changed, 1 insertion(+), 37 deletions(-) - -diff --git a/oci/private/image.bzl b/oci/private/image.bzl -index e8a6ca5..434947c 100644 ---- a/oci/private/image.bzl -+++ b/oci/private/image.bzl -@@ -226,15 +226,6 @@ def _oci_image_impl(ctx): - - action_env = {} - -- # Windows: Don't convert arguments like --entrypoint=/some/bin to --entrypoint=C:/msys64/some/bin -- if ctx.target_platform_has_constraint(ctx.attr._windows_constraint[platform_common.ConstraintValueInfo]): -- # See https://www.msys2.org/wiki/Porting/: -- # > Setting MSYS2_ARG_CONV_EXCL=* prevents any path transformation. -- action_env["MSYS2_ARG_CONV_EXCL"] = "*" -- -- # This one is for Windows Git MSys -- action_env["MSYS_NO_PATHCONV"] = "1" -- - ctx.actions.run( - inputs = depset(inputs, transitive = transitive_inputs), - arguments = [args], -diff --git a/oci/private/util.bzl b/oci/private/util.bzl -index 7c2a2c2..479ca7d 100644 ---- a/oci/private/util.bzl -+++ b/oci/private/util.bzl -@@ -141,34 +141,7 @@ def _maybe_wrap_launcher_for_windows(ctx, bash_launcher): - - make sure the bash_launcher is in the inputs to the action - - @bazel_tools//tools/sh:toolchain_type should appear in the rules toolchains - """ -- if not ctx.target_platform_has_constraint(ctx.attr._windows_constraint[platform_common.ConstraintValueInfo]): -- return bash_launcher -- -- win_launcher = ctx.actions.declare_file("wrap_%s.bat" % ctx.label.name) -- ctx.actions.write( -- output = win_launcher, -- content = r"""@echo off --SETLOCAL ENABLEEXTENSIONS --SETLOCAL ENABLEDELAYEDEXPANSION --for %%a in ("{bash_bin}") do set "bash_bin_dir=%%~dpa" --set PATH=%bash_bin_dir%;%PATH% --set "parent_dir=%~dp0" --set "parent_dir=!parent_dir:\=/!" --set args=%* --rem Escape \ and * in args before passing it with double quote --if defined args ( -- set args=!args:\=\\\\! -- set args=!args:"=\"! --) --"{bash_bin}" -c "%parent_dir%{launcher} !args!" --""".format( -- bash_bin = ctx.toolchains["@bazel_tools//tools/sh:toolchain_type"].path, -- launcher = paths.relativize(bash_launcher.path, win_launcher.dirname), -- ), -- is_executable = True, -- ) -- -- return win_launcher -+ return bash_launcher - - def _file_exists(rctx, path): - result = rctx.execute(["stat", path]) --- -2.46.0 - diff --git a/bazel/toolchains/BUILD.bazel b/bazel/toolchains/BUILD.bazel index 55a19db8a..5849b1b8b 100644 --- a/bazel/toolchains/BUILD.bazel +++ b/bazel/toolchains/BUILD.bazel @@ -1 +1 @@ -"""This folder contains toolchain dependencies for the project. They are loaded by `WORKSPACE.bzlmod`.""" +"""This folder contains toolchain dependencies for the project. They are loaded by `WORKSPACE.bazel`.""" diff --git a/bazel/toolchains/aspect_bazel_lib.bzl b/bazel/toolchains/aspect_bazel_lib.bzl new file mode 100644 index 000000000..4ef0b14ed --- /dev/null +++ b/bazel/toolchains/aspect_bazel_lib.bzl @@ -0,0 +1,15 @@ +"""aspect bazel library""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def aspect_bazel_lib(): + http_archive( + name = "aspect_bazel_lib", + sha256 = "979667bb7276ee8fcf2c114c9be9932b9a3052a64a647e0dcaacfb9c0016f0a3", + strip_prefix = "bazel-lib-2.4.1", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/979667bb7276ee8fcf2c114c9be9932b9a3052a64a647e0dcaacfb9c0016f0a3", + "https://github.com/aspect-build/bazel-lib/releases/download/v2.4.1/bazel-lib-v2.4.1.tar.gz", + ], + type = "tar.gz", + ) diff --git a/bazel/toolchains/buildifier_deps.bzl b/bazel/toolchains/buildifier_deps.bzl new file mode 100644 index 000000000..c745e17ce --- /dev/null +++ b/bazel/toolchains/buildifier_deps.bzl @@ -0,0 +1,15 @@ +"""buildifier repository rules""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def buildifier_deps(): + http_archive( + name = "com_github_bazelbuild_buildtools", + strip_prefix = "buildtools-6.4.0", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/05c3c3602d25aeda1e9dbc91d3b66e624c1f9fdadf273e5480b489e744ca7269", + "https://github.com/bazelbuild/buildtools/archive/refs/tags/v6.4.0.tar.gz", + ], + type = "tar.gz", + sha256 = "05c3c3602d25aeda1e9dbc91d3b66e624c1f9fdadf273e5480b489e744ca7269", + ) diff --git a/bazel/toolchains/cc_deps.bzl b/bazel/toolchains/cc_deps.bzl new file mode 100644 index 000000000..cdda9821c --- /dev/null +++ b/bazel/toolchains/cc_deps.bzl @@ -0,0 +1,15 @@ +"""bazel rules_cc""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def rules_cc_deps(): + http_archive( + name = "rules_cc", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/2037875b9a4456dce4a79d112a8ae885bbc4aad968e6587dca6e64f3a0900cdf", + "https://github.com/bazelbuild/rules_cc/releases/download/0.0.9/rules_cc-0.0.9.tar.gz", + ], + sha256 = "2037875b9a4456dce4a79d112a8ae885bbc4aad968e6587dca6e64f3a0900cdf", + strip_prefix = "rules_cc-0.0.9", + type = "tar.gz", + ) diff --git a/bazel/toolchains/ci_deps.bzl b/bazel/toolchains/ci_deps.bzl index f6354e6ca..e26be8713 100644 --- a/bazel/toolchains/ci_deps.bzl +++ b/bazel/toolchains/ci_deps.bzl @@ -19,33 +19,33 @@ def _shellcheck_deps(): http_archive( name = "com_github_koalaman_shellcheck_linux_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/6c881ab0698e4e6ea235245f22832860544f17ba386442fe7e9d629f8cbedf87", - "https://github.com/koalaman/shellcheck/releases/download/v0.10.0/shellcheck-v0.10.0.linux.x86_64.tar.xz", + "https://cdn.confidential.cloud/constellation/cas/sha256/700324c6dd0ebea0117591c6cc9d7350d9c7c5c287acbad7630fa17b1d4d9e2f", + "https://github.com/koalaman/shellcheck/releases/download/v0.9.0/shellcheck-v0.9.0.linux.x86_64.tar.xz", ], - sha256 = "6c881ab0698e4e6ea235245f22832860544f17ba386442fe7e9d629f8cbedf87", - strip_prefix = "shellcheck-v0.10.0", + sha256 = "700324c6dd0ebea0117591c6cc9d7350d9c7c5c287acbad7630fa17b1d4d9e2f", + strip_prefix = "shellcheck-v0.9.0", build_file_content = """exports_files(["shellcheck"], visibility = ["//visibility:public"])""", type = "tar.xz", ) http_archive( name = "com_github_koalaman_shellcheck_linux_arm64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/324a7e89de8fa2aed0d0c28f3dab59cf84c6d74264022c00c22af665ed1a09bb", - "https://github.com/koalaman/shellcheck/releases/download/v0.10.0/shellcheck-v0.10.0.linux.aarch64.tar.xz", + "https://cdn.confidential.cloud/constellation/cas/sha256/179c579ef3481317d130adebede74a34dbbc2df961a70916dd4039ebf0735fae", + "https://github.com/koalaman/shellcheck/releases/download/v0.9.0/shellcheck-v0.9.0.linux.aarch64.tar.xz", ], - sha256 = "324a7e89de8fa2aed0d0c28f3dab59cf84c6d74264022c00c22af665ed1a09bb", - strip_prefix = "shellcheck-v0.10.0", + sha256 = "179c579ef3481317d130adebede74a34dbbc2df961a70916dd4039ebf0735fae", + strip_prefix = "shellcheck-v0.9.0", build_file_content = """exports_files(["shellcheck"], visibility = ["//visibility:public"])""", type = "tar.xz", ) http_archive( name = "com_github_koalaman_shellcheck_darwin_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/ef27684f23279d112d8ad84e0823642e43f838993bbb8c0963db9b58a90464c2", - "https://github.com/koalaman/shellcheck/releases/download/v0.10.0/shellcheck-v0.10.0.darwin.x86_64.tar.xz", + "https://cdn.confidential.cloud/constellation/cas/sha256/7d3730694707605d6e60cec4efcb79a0632d61babc035aa16cda1b897536acf5", + "https://github.com/koalaman/shellcheck/releases/download/v0.9.0/shellcheck-v0.9.0.darwin.x86_64.tar.xz", ], - sha256 = "ef27684f23279d112d8ad84e0823642e43f838993bbb8c0963db9b58a90464c2", - strip_prefix = "shellcheck-v0.10.0", + sha256 = "7d3730694707605d6e60cec4efcb79a0632d61babc035aa16cda1b897536acf5", + strip_prefix = "shellcheck-v0.9.0", build_file_content = """exports_files(["shellcheck"], visibility = ["//visibility:public"])""", type = "tar.xz", ) @@ -97,83 +97,83 @@ def _actionlint_deps(): name = "com_github_rhysd_actionlint_linux_amd64", build_file_content = """exports_files(["actionlint"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/023070a287cd8cccd71515fedc843f1985bf96c436b7effaecce67290e7e0757", - "https://github.com/rhysd/actionlint/releases/download/v1.7.7/actionlint_1.7.7_linux_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/f0294c342af98fad4ff917bc32032f28e1b55f76aedf291886ec10bbed7c12e1", + "https://github.com/rhysd/actionlint/releases/download/v1.6.26/actionlint_1.6.26_linux_amd64.tar.gz", ], type = "tar.gz", - sha256 = "023070a287cd8cccd71515fedc843f1985bf96c436b7effaecce67290e7e0757", + sha256 = "f0294c342af98fad4ff917bc32032f28e1b55f76aedf291886ec10bbed7c12e1", ) http_archive( name = "com_github_rhysd_actionlint_linux_arm64", build_file_content = """exports_files(["actionlint"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/401942f9c24ed71e4fe71b76c7d638f66d8633575c4016efd2977ce7c28317d0", - "https://github.com/rhysd/actionlint/releases/download/v1.7.7/actionlint_1.7.7_linux_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/a1056d85d614af4f6e5517ed2911dab2621b8e97c368c8b265328f9c22801648", + "https://github.com/rhysd/actionlint/releases/download/v1.6.26/actionlint_1.6.26_linux_arm64.tar.gz", ], type = "tar.gz", - sha256 = "401942f9c24ed71e4fe71b76c7d638f66d8633575c4016efd2977ce7c28317d0", + sha256 = "a1056d85d614af4f6e5517ed2911dab2621b8e97c368c8b265328f9c22801648", ) http_archive( name = "com_github_rhysd_actionlint_darwin_amd64", build_file_content = """exports_files(["actionlint"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/28e5de5a05fc558474f638323d736d822fff183d2d492f0aecb2b73cc44584f5", - "https://github.com/rhysd/actionlint/releases/download/v1.7.7/actionlint_1.7.7_darwin_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/bfa890e77a8508603c785af09a30bbab3a3255d291d8d27efc3f20ac8e303a8e", + "https://github.com/rhysd/actionlint/releases/download/v1.6.26/actionlint_1.6.26_darwin_amd64.tar.gz", ], type = "tar.gz", - sha256 = "28e5de5a05fc558474f638323d736d822fff183d2d492f0aecb2b73cc44584f5", + sha256 = "bfa890e77a8508603c785af09a30bbab3a3255d291d8d27efc3f20ac8e303a8e", ) http_archive( name = "com_github_rhysd_actionlint_darwin_arm64", build_file_content = """exports_files(["actionlint"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/2693315b9093aeacb4ebd91a993fea54fc215057bf0da2659056b4bc033873db", - "https://github.com/rhysd/actionlint/releases/download/v1.7.7/actionlint_1.7.7_darwin_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/5e131ab7de7ad051e1923b80d167aaa414734e97c720698c48778250e1dd2590", + "https://github.com/rhysd/actionlint/releases/download/v1.6.26/actionlint_1.6.26_darwin_arm64.tar.gz", ], type = "tar.gz", - sha256 = "2693315b9093aeacb4ebd91a993fea54fc215057bf0da2659056b4bc033873db", + sha256 = "5e131ab7de7ad051e1923b80d167aaa414734e97c720698c48778250e1dd2590", ) def _gofumpt_deps(): http_file( name = "com_github_mvdan_gofumpt_linux_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/11604bbaf7321abcc2fca2c6a37b7e9198bb1e76e5a86f297c07201e8ab1fda9", - "https://github.com/mvdan/gofumpt/releases/download/v0.8.0/gofumpt_v0.8.0_linux_amd64", + "https://cdn.confidential.cloud/constellation/cas/sha256/bdb57c353e2bbc43d2b097bb7289a6e65ef2526787f89316b4b452a9e5086be4", + "https://github.com/mvdan/gofumpt/releases/download/v0.6.0/gofumpt_v0.6.0_linux_amd64", ], executable = True, downloaded_file_path = "gofumpt", - sha256 = "11604bbaf7321abcc2fca2c6a37b7e9198bb1e76e5a86f297c07201e8ab1fda9", + sha256 = "bdb57c353e2bbc43d2b097bb7289a6e65ef2526787f89316b4b452a9e5086be4", ) http_file( name = "com_github_mvdan_gofumpt_linux_arm64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/787c1d3d4d20e6fe2b0bf06a5a913ac0f50343dbf9a71540724a2b8092a0e6ca", - "https://github.com/mvdan/gofumpt/releases/download/v0.8.0/gofumpt_v0.8.0_linux_arm64", + "https://cdn.confidential.cloud/constellation/cas/sha256/10ff2643b7b4b9425bda7f0ca2d4e54d85b09024fbfd9c21dbfd55017b907965", + "https://github.com/mvdan/gofumpt/releases/download/v0.6.0/gofumpt_v0.6.0_linux_arm64", ], executable = True, downloaded_file_path = "gofumpt", - sha256 = "787c1d3d4d20e6fe2b0bf06a5a913ac0f50343dbf9a71540724a2b8092a0e6ca", + sha256 = "10ff2643b7b4b9425bda7f0ca2d4e54d85b09024fbfd9c21dbfd55017b907965", ) http_file( name = "com_github_mvdan_gofumpt_darwin_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/0dda6600cf263b703a5ad93e792b06180c36afdee9638617a91dd552f2c6fb3e", - "https://github.com/mvdan/gofumpt/releases/download/v0.8.0/gofumpt_v0.8.0_darwin_amd64", + "https://cdn.confidential.cloud/constellation/cas/sha256/59e6047b3fa2fb65b60cf7f8be9b77cf6b31b428a9a24042ce29e85140868036", + "https://github.com/mvdan/gofumpt/releases/download/v0.6.0/gofumpt_v0.6.0_darwin_amd64", ], executable = True, downloaded_file_path = "gofumpt", - sha256 = "0dda6600cf263b703a5ad93e792b06180c36afdee9638617a91dd552f2c6fb3e", + sha256 = "59e6047b3fa2fb65b60cf7f8be9b77cf6b31b428a9a24042ce29e85140868036", ) http_file( name = "com_github_mvdan_gofumpt_darwin_arm64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/7e66e92b7a67d1d12839ab030fb7ae38e5e2273474af3762e67bc7fe9471fcd9", - "https://github.com/mvdan/gofumpt/releases/download/v0.8.0/gofumpt_v0.8.0_darwin_arm64", + "https://cdn.confidential.cloud/constellation/cas/sha256/725f7a922bf3f88bed6818a4553e80554cc5cdb67a72236a01707325aa2dbb7b", + "https://github.com/mvdan/gofumpt/releases/download/v0.6.0/gofumpt_v0.6.0_darwin_arm64", ], executable = True, downloaded_file_path = "gofumpt", - sha256 = "7e66e92b7a67d1d12839ab030fb7ae38e5e2273474af3762e67bc7fe9471fcd9", + sha256 = "725f7a922bf3f88bed6818a4553e80554cc5cdb67a72236a01707325aa2dbb7b", ) def _tfsec_deps(): @@ -181,41 +181,41 @@ def _tfsec_deps(): name = "com_github_aquasecurity_tfsec_linux_amd64", build_file_content = """exports_files(["tfsec"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/329ae7f67f2f1813ebe08de498719ea7003c75d3ca24bb0b038369062508008e", - "https://github.com/aquasecurity/tfsec/releases/download/v1.28.14/tfsec_1.28.14_linux_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/21201f1615de0b4c143eba2da0f988bab3f68184646090b30ece1fdb501396ca", + "https://github.com/aquasecurity/tfsec/releases/download/v1.28.5/tfsec_1.28.5_linux_amd64.tar.gz", ], type = "tar.gz", - sha256 = "329ae7f67f2f1813ebe08de498719ea7003c75d3ca24bb0b038369062508008e", + sha256 = "21201f1615de0b4c143eba2da0f988bab3f68184646090b30ece1fdb501396ca", ) http_archive( name = "com_github_aquasecurity_tfsec_linux_arm64", build_file_content = """exports_files(["tfsec"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/13dcbd3602027be49ce6cab7e1c24b0a8e833f0143fe327b0a13b87686541ce0", - "https://github.com/aquasecurity/tfsec/releases/download/v1.28.14/tfsec_1.28.14_linux_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/a587a9cd879240074551067114b8f63d2249aab70cabf8f8d6884e2b67cfddad", + "https://github.com/aquasecurity/tfsec/releases/download/v1.28.5/tfsec_1.28.5_linux_arm64.tar.gz", ], type = "tar.gz", - sha256 = "13dcbd3602027be49ce6cab7e1c24b0a8e833f0143fe327b0a13b87686541ce0", + sha256 = "a587a9cd879240074551067114b8f63d2249aab70cabf8f8d6884e2b67cfddad", ) http_archive( name = "com_github_aquasecurity_tfsec_darwin_amd64", build_file_content = """exports_files(["tfsec"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/0aeef31f83d6f44ba9ba5b6cbb954304c772dee73ac704e38896940f94af887a", - "https://github.com/aquasecurity/tfsec/releases/download/v1.28.14/tfsec_1.28.14_darwin_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/4ad9a313d84aa11893672c7779a99f85a6acaab26c5a05ccc432db08bc4c0a37", + "https://github.com/aquasecurity/tfsec/releases/download/v1.28.5/tfsec_1.28.5_darwin_amd64.tar.gz", ], type = "tar.gz", - sha256 = "0aeef31f83d6f44ba9ba5b6cbb954304c772dee73ac704e38896940f94af887a", + sha256 = "4ad9a313d84aa11893672c7779a99f85a6acaab26c5a05ccc432db08bc4c0a37", ) http_archive( name = "com_github_aquasecurity_tfsec_darwin_arm64", build_file_content = """exports_files(["tfsec"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/f39d59a3f9be4eeb3d965657653ad62243103a3d921ce52ca8f907cff45896f5", - "https://github.com/aquasecurity/tfsec/releases/download/v1.28.14/tfsec_1.28.14_darwin_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/50117ac409bb3c1101453d74f48a639c7ab7ac2f40b023eb7004d84048913888", + "https://github.com/aquasecurity/tfsec/releases/download/v1.28.5/tfsec_1.28.5_darwin_arm64.tar.gz", ], type = "tar.gz", - sha256 = "f39d59a3f9be4eeb3d965657653ad62243103a3d921ce52ca8f907cff45896f5", + sha256 = "50117ac409bb3c1101453d74f48a639c7ab7ac2f40b023eb7004d84048913888", ) def _golangci_lint_deps(): @@ -223,45 +223,45 @@ def _golangci_lint_deps(): name = "com_github_golangci_golangci_lint_linux_amd64", build_file = "//bazel/toolchains:BUILD.golangci.bazel", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/e55e0eb515936c0fbd178bce504798a9bd2f0b191e5e357768b18fd5415ee541", - "https://github.com/golangci/golangci-lint/releases/download/v2.1.6/golangci-lint-2.1.6-linux-amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/e1c313fb5fc85a33890fdee5dbb1777d1f5829c84d655a47a55688f3aad5e501", + "https://github.com/golangci/golangci-lint/releases/download/v1.56.2/golangci-lint-1.56.2-linux-amd64.tar.gz", ], - strip_prefix = "golangci-lint-2.1.6-linux-amd64", + strip_prefix = "golangci-lint-1.56.2-linux-amd64", type = "tar.gz", - sha256 = "e55e0eb515936c0fbd178bce504798a9bd2f0b191e5e357768b18fd5415ee541", + sha256 = "e1c313fb5fc85a33890fdee5dbb1777d1f5829c84d655a47a55688f3aad5e501", ) http_archive( name = "com_github_golangci_golangci_lint_linux_arm64", build_file = "//bazel/toolchains:BUILD.golangci.bazel", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/582eb73880f4408d7fb89f12b502d577bd7b0b63d8c681da92bb6b9d934d4363", - "https://github.com/golangci/golangci-lint/releases/download/v2.1.6/golangci-lint-2.1.6-linux-arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/0041594fde41ce43b75e65476a050fe9057881d8b5bccd472f18357e2ead3e04", + "https://github.com/golangci/golangci-lint/releases/download/v1.56.2/golangci-lint-1.56.2-linux-arm64.tar.gz", ], - strip_prefix = "golangci-lint-2.1.6-linux-arm64", + strip_prefix = "golangci-lint-1.56.2-linux-arm64", type = "tar.gz", - sha256 = "582eb73880f4408d7fb89f12b502d577bd7b0b63d8c681da92bb6b9d934d4363", + sha256 = "0041594fde41ce43b75e65476a050fe9057881d8b5bccd472f18357e2ead3e04", ) http_archive( name = "com_github_golangci_golangci_lint_darwin_amd64", build_file = "//bazel/toolchains:BUILD.golangci.bazel", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/e091107c4ca7e283902343ba3a09d14fb56b86e071effd461ce9d67193ef580e", - "https://github.com/golangci/golangci-lint/releases/download/v2.1.6/golangci-lint-2.1.6-darwin-amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/15c4d19a2c85a04f67779047dbb9467ba176c71fff762a0d514a21bb75e4b42c", + "https://github.com/golangci/golangci-lint/releases/download/v1.56.2/golangci-lint-1.56.2-darwin-amd64.tar.gz", ], - strip_prefix = "golangci-lint-2.1.6-darwin-amd64", + strip_prefix = "golangci-lint-1.56.2-darwin-amd64", type = "tar.gz", - sha256 = "e091107c4ca7e283902343ba3a09d14fb56b86e071effd461ce9d67193ef580e", + sha256 = "15c4d19a2c85a04f67779047dbb9467ba176c71fff762a0d514a21bb75e4b42c", ) http_archive( name = "com_github_golangci_golangci_lint_darwin_arm64", build_file = "//bazel/toolchains:BUILD.golangci.bazel", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/90783fa092a0f64a4f7b7d419f3da1f53207e300261773babe962957240e9ea6", - "https://github.com/golangci/golangci-lint/releases/download/v2.1.6/golangci-lint-2.1.6-darwin-arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/5f9ecda712c7ae08fbf872336fae3db866720e5865903d4c53903184b2a2c2dc", + "https://github.com/golangci/golangci-lint/releases/download/v1.56.2/golangci-lint-1.56.2-darwin-arm64.tar.gz", ], - strip_prefix = "golangci-lint-2.1.6-darwin-arm64", + strip_prefix = "golangci-lint-1.56.2-darwin-arm64", type = "tar.gz", - sha256 = "90783fa092a0f64a4f7b7d419f3da1f53207e300261773babe962957240e9ea6", + sha256 = "5f9ecda712c7ae08fbf872336fae3db866720e5865903d4c53903184b2a2c2dc", ) def _buf_deps(): @@ -270,44 +270,44 @@ def _buf_deps(): strip_prefix = "buf/bin", build_file_content = """exports_files(["buf"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/fa10faf16973f3861992cc2687b651350d70eafd467aea72cf0994556c2a0927", - "https://github.com/bufbuild/buf/releases/download/v1.54.0/buf-Linux-x86_64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/1033f26361e6fc30ffcfab9d4e4274ffd4af88d9c97de63d2e1721c4a07c1380", + "https://github.com/bufbuild/buf/releases/download/v1.29.0/buf-Linux-x86_64.tar.gz", ], type = "tar.gz", - sha256 = "fa10faf16973f3861992cc2687b651350d70eafd467aea72cf0994556c2a0927", + sha256 = "1033f26361e6fc30ffcfab9d4e4274ffd4af88d9c97de63d2e1721c4a07c1380", ) http_archive( name = "com_github_bufbuild_buf_linux_arm64", strip_prefix = "buf/bin", build_file_content = """exports_files(["buf"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/f41ef4431858556ece6a77662d6b9317fa4406585998cb3dffb7403b3e86713e", - "https://github.com/bufbuild/buf/releases/download/v1.54.0/buf-Linux-aarch64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/a725e0ab1c6b1e97b31f9d1d946f8b1d56586a96715fae4a7ecc88b6cf601cea", + "https://github.com/bufbuild/buf/releases/download/v1.29.0/buf-Linux-aarch64.tar.gz", ], type = "tar.gz", - sha256 = "f41ef4431858556ece6a77662d6b9317fa4406585998cb3dffb7403b3e86713e", + sha256 = "a725e0ab1c6b1e97b31f9d1d946f8b1d56586a96715fae4a7ecc88b6cf601cea", ) http_archive( name = "com_github_bufbuild_buf_darwin_amd64", strip_prefix = "buf/bin", build_file_content = """exports_files(["buf"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/22c9836a836b867e49e9d0ef223fd934cbf2690e7400facddb9be07b8809f889", - "https://github.com/bufbuild/buf/releases/download/v1.54.0/buf-Darwin-x86_64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/7ec6c2fd8f7e5e2ddba1e9ebff51eb9b0d6b67b85e105138dd064057c7b32db8", + "https://github.com/bufbuild/buf/releases/download/v1.29.0/buf-Darwin-x86_64.tar.gz", ], type = "tar.gz", - sha256 = "22c9836a836b867e49e9d0ef223fd934cbf2690e7400facddb9be07b8809f889", + sha256 = "7ec6c2fd8f7e5e2ddba1e9ebff51eb9b0d6b67b85e105138dd064057c7b32db8", ) http_archive( name = "com_github_bufbuild_buf_darwin_arm64", strip_prefix = "buf/bin", build_file_content = """exports_files(["buf"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/f01f32a690efab3ef22a1c821aebc0c4bec7ca63faddbf64408d7d614e9d7f92", - "https://github.com/bufbuild/buf/releases/download/v1.54.0/buf-Darwin-arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/b97225a3b3f18bdabb36e83d9aba2e6419ead0c6ca0894d10a95517be5fd302f", + "https://github.com/bufbuild/buf/releases/download/v1.29.0/buf-Darwin-arm64.tar.gz", ], type = "tar.gz", - sha256 = "f01f32a690efab3ef22a1c821aebc0c4bec7ca63faddbf64408d7d614e9d7f92", + sha256 = "b97225a3b3f18bdabb36e83d9aba2e6419ead0c6ca0894d10a95517be5fd302f", ) def _talos_docgen_deps(): @@ -351,46 +351,46 @@ def _talos_docgen_deps(): def _helm_deps(): http_archive( name = "com_github_helm_helm_linux_amd64", - sha256 = "a5844ef2c38ef6ddf3b5a8f7d91e7e0e8ebc39a38bb3fc8013d629c1ef29c259", + sha256 = "f43e1c3387de24547506ab05d24e5309c0ce0b228c23bd8aa64e9ec4b8206651", strip_prefix = "linux-amd64", build_file_content = """exports_files(["helm"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/a5844ef2c38ef6ddf3b5a8f7d91e7e0e8ebc39a38bb3fc8013d629c1ef29c259", - "https://get.helm.sh/helm-v3.14.4-linux-amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/f43e1c3387de24547506ab05d24e5309c0ce0b228c23bd8aa64e9ec4b8206651", + "https://get.helm.sh/helm-v3.14.0-linux-amd64.tar.gz", ], type = "tar.gz", ) http_archive( name = "com_github_helm_helm_linux_arm64", - sha256 = "113ccc53b7c57c2aba0cd0aa560b5500841b18b5210d78641acfddc53dac8ab2", + sha256 = "b29e61674731b15f6ad3d1a3118a99d3cc2ab25a911aad1b8ac8c72d5a9d2952", strip_prefix = "linux-arm64", build_file_content = """exports_files(["helm"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/113ccc53b7c57c2aba0cd0aa560b5500841b18b5210d78641acfddc53dac8ab2", - "https://get.helm.sh/helm-v3.14.4-linux-arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/b29e61674731b15f6ad3d1a3118a99d3cc2ab25a911aad1b8ac8c72d5a9d2952", + "https://get.helm.sh/helm-v3.14.0-linux-arm64.tar.gz", ], type = "tar.gz", ) http_archive( name = "com_github_helm_helm_darwin_amd64", - sha256 = "73434aeac36ad068ce2e5582b8851a286dc628eae16494a26e2ad0b24a7199f9", + sha256 = "804586896496f7b3da97f56089ea00f220e075e969b6fdf6c0b7b9cdc22de120", strip_prefix = "darwin-amd64", build_file_content = """exports_files(["helm"], visibility = ["//visibility:public"])""", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/73434aeac36ad068ce2e5582b8851a286dc628eae16494a26e2ad0b24a7199f9", - "https://get.helm.sh/helm-v3.14.4-darwin-amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/804586896496f7b3da97f56089ea00f220e075e969b6fdf6c0b7b9cdc22de120", + "https://get.helm.sh/helm-v3.14.0-darwin-amd64.tar.gz", ], type = "tar.gz", ) http_archive( name = "com_github_helm_helm_darwin_arm64", - sha256 = "61e9c5455f06b2ad0a1280975bf65892e707adc19d766b0cf4e9006e3b7b4b6c", + sha256 = "c2f36f3289a01c7c93ca11f84d740a170e0af1d2d0280bd523a409a62b8dfa1d", strip_prefix = "darwin-arm64", build_file_content = """exports_files(["helm"], visibility = ["//visibility:public"])""", type = "tar.gz", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/61e9c5455f06b2ad0a1280975bf65892e707adc19d766b0cf4e9006e3b7b4b6c", - "https://get.helm.sh/helm-v3.14.4-darwin-arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/c2f36f3289a01c7c93ca11f84d740a170e0af1d2d0280bd523a409a62b8dfa1d", + "https://get.helm.sh/helm-v3.14.0-darwin-arm64.tar.gz", ], ) @@ -398,40 +398,40 @@ def _ghh_deps(): http_archive( name = "com_github_katexochen_ghh_linux_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/32f8a4110d88d80e163212a89a3538a13326494840ac97183d1b20bcc9eac7ba", - "https://github.com/katexochen/ghh/releases/download/v0.3.5/ghh_0.3.5_linux_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/e53411fba3e3445bd08d4d7ce0ee9e526e9fcb12045616c80b9eb1cc342f0c90", + "https://github.com/katexochen/ghh/releases/download/v0.3.3/ghh_0.3.3_linux_amd64.tar.gz", ], type = "tar.gz", build_file_content = """exports_files(["ghh"], visibility = ["//visibility:public"])""", - sha256 = "32f8a4110d88d80e163212a89a3538a13326494840ac97183d1b20bcc9eac7ba", + sha256 = "e53411fba3e3445bd08d4d7ce0ee9e526e9fcb12045616c80b9eb1cc342f0c90", ) http_archive( name = "com_github_katexochen_ghh_linux_arm64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/b43ef1dd2f851eed7c69c87f4f73dd923bd1170cefbde247933d5b398a3319d1", - "https://github.com/katexochen/ghh/releases/download/v0.3.5/ghh_0.3.5_linux_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/96e8c073fca7b8b56490359b3df0386fac9138224699c71b33c14abc2373452b", + "https://github.com/katexochen/ghh/releases/download/v0.3.3/ghh_0.3.3_linux_arm64.tar.gz", ], type = "tar.gz", build_file_content = """exports_files(["ghh"], visibility = ["//visibility:public"])""", - sha256 = "b43ef1dd2f851eed7c69c87f4f73dd923bd1170cefbde247933d5b398a3319d1", + sha256 = "96e8c073fca7b8b56490359b3df0386fac9138224699c71b33c14abc2373452b", ) http_archive( name = "com_github_katexochen_ghh_darwin_amd64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/7db9ebb62faf2a31f56e7a8994a971adddec98b3238880ae58b01eb549b8bba3", - "https://github.com/katexochen/ghh/releases/download/v0.3.5/ghh_0.3.5_darwin_amd64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/0fbffabe8f80c640403ef9b03348bd34e1f7de1321b0da48a36ae0403fabd09a", + "https://github.com/katexochen/ghh/releases/download/v0.3.3/ghh_0.3.3_darwin_amd64.tar.gz", ], type = "tar.gz", build_file_content = """exports_files(["ghh"], visibility = ["//visibility:public"])""", - sha256 = "7db9ebb62faf2a31f56e7a8994a971adddec98b3238880ae58b01eb549b8bba3", + sha256 = "0fbffabe8f80c640403ef9b03348bd34e1f7de1321b0da48a36ae0403fabd09a", ) http_archive( name = "com_github_katexochen_ghh_darwin_arm64", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/78a2c8b321893736a2b5de59898794a72b878db9329157f348489c73d4592c6f", - "https://github.com/katexochen/ghh/releases/download/v0.3.5/ghh_0.3.5_darwin_arm64.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/32457fadf46c1b3e15c1caeadd176e8ff67152de744c83e7afaeb308bd514193", + "https://github.com/katexochen/ghh/releases/download/v0.3.3/ghh_0.3.3_darwin_arm64.tar.gz", ], type = "tar.gz", build_file_content = """exports_files(["ghh"], visibility = ["//visibility:public"])""", - sha256 = "78a2c8b321893736a2b5de59898794a72b878db9329157f348489c73d4592c6f", + sha256 = "32457fadf46c1b3e15c1caeadd176e8ff67152de744c83e7afaeb308bd514193", ) diff --git a/bazel/toolchains/container_images.bzl b/bazel/toolchains/container_images.bzl index 1c405484f..94d881adc 100644 --- a/bazel/toolchains/container_images.bzl +++ b/bazel/toolchains/container_images.bzl @@ -7,7 +7,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull") def containter_image_deps(): oci_pull( name = "distroless_static", - digest = "sha256:3d0f463de06b7ddff27684ec3bfd0b54a425149d0f8685308b1fdf297b0265e9", + digest = "sha256:6706c73aae2afaa8201d63cc3dda48753c09bcd6c300762251065c0f7e602b25", image = "gcr.io/distroless/static", platforms = [ "linux/amd64", @@ -16,6 +16,6 @@ def containter_image_deps(): ) oci_pull( name = "libvirtd_base", - digest = "sha256:f23e0f587860c841adde25b1b4f0d99aa4fbce1c92b01b5b46ab5fa35980a135", + digest = "sha256:527fc93a1a53c08b51f87295ff45745dab4570da7cbeb28e93f359e813aba29b", image = "ghcr.io/edgelesssys/constellation/libvirtd-base", ) diff --git a/bazel/toolchains/go_module_deps.bzl b/bazel/toolchains/go_module_deps.bzl new file mode 100644 index 000000000..4d432be73 --- /dev/null +++ b/bazel/toolchains/go_module_deps.bzl @@ -0,0 +1,7182 @@ +"""Go module dependencies for Bazel. + +Contains the equivalent of go.mod and go.sum files for Bazel. +""" + +load("@bazel_gazelle//:deps.bzl", "go_repository") + +def go_dependencies(): + """Declare Go module dependencies for Bazel.""" + go_repository( + name = "build_buf_gen_go_bufbuild_protovalidate_protocolbuffers_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go", + sum = "h1:tdpHgTbmbvEIARu+bixzmleMi14+3imnpoFXz+Qzjp4=", + version = "v1.31.0-20230802163732-1c33ebd9ecfa.1", + ) + go_repository( + name = "cat_dario_mergo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "dario.cat/mergo", + sum = "h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=", + version = "v1.0.0", + ) + go_repository( + name = "cc_mvdan_editorconfig", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "mvdan.cc/editorconfig", + sum = "h1:XL+7ys6ls/RKrkUNFQvEwIvNHh+JKx8Mj1pUV5wQxQE=", + version = "v0.2.0", + ) + go_repository( + name = "cc_mvdan_unparam", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "mvdan.cc/unparam", + sum = "h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI=", + version = "v0.0.0-20230312165513-e84e2d14e3b8", + ) + go_repository( + name = "co_honnef_go_tools", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "honnef.co/go/tools", + sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=", + version = "v0.0.1-2020.1.4", + ) + go_repository( + name = "com_github_adalogics_go_fuzz_headers", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/AdaLogics/go-fuzz-headers", + sum = "h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=", + version = "v0.0.0-20230811130428-ced1acdcaa24", + ) + go_repository( + name = "com_github_adamkorcz_go_118_fuzz_build", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/AdamKorcz/go-118-fuzz-build", + sum = "h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=", + version = "v0.0.0-20230306123547-8075edf89bb0", + ) + go_repository( + name = "com_github_adamkorcz_go_fuzz_headers_1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/AdamKorcz/go-fuzz-headers-1", + sum = "h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg=", + version = "v0.0.0-20230919221257-8b5d3ce2d11d", + ) + go_repository( + name = "com_github_adrg_xdg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/adrg/xdg", + sum = "h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_agext_levenshtein", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/agext/levenshtein", + sum = "h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=", + version = "v1.2.3", + ) + go_repository( + name = "com_github_agnivade_levenshtein", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/agnivade/levenshtein", + sum = "h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_akavel_rsrc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/akavel/rsrc", + sum = "h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw=", + version = "v0.10.2", + ) + go_repository( + name = "com_github_alcortesm_tgz", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/alcortesm/tgz", + sum = "h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=", + version = "v0.0.0-20161220082320-9c5fe88206d7", + ) + go_repository( + name = "com_github_alecthomas_kingpin_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/alecthomas/kingpin/v2", + sum = "h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=", + version = "v2.4.0", + ) + go_repository( + name = "com_github_alecthomas_template", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/alecthomas/template", + sum = "h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=", + version = "v0.0.0-20160405071501-a0175ee3bccc", + ) + go_repository( + name = "com_github_alecthomas_units", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/alecthomas/units", + sum = "h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=", + version = "v0.0.0-20211218093645-b94a6e3cc137", + ) + go_repository( + name = "com_github_alessio_shellescape", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/alessio/shellescape", + sum = "h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=", + version = "v1.4.1", + ) + go_repository( + name = "com_github_anatol_vmtest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/anatol/vmtest", + sum = "h1:t4JGeY9oaF5LB4Rdx9e2wARRRPAYt8Ow4eCf5SwO3fA=", + version = "v0.0.0-20220413190228-7a42f1f6d7b8", + ) + go_repository( + name = "com_github_anmitsu_go_shlex", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/anmitsu/go-shlex", + sum = "h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=", + version = "v0.0.0-20161002113705-648efa622239", + ) + go_repository( + name = "com_github_antihax_optional", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/antihax/optional", + sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_antlr_antlr4_runtime_go_antlr_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/antlr/antlr4/runtime/Go/antlr/v4", + sum = "h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk=", + version = "v4.0.0-20230512164433-5d1fd1a340c9", + ) + go_repository( + name = "com_github_apparentlymart_go_dump", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/apparentlymart/go-dump", + sum = "h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=", + version = "v0.0.0-20180507223929-23540a00eaa3", + ) + go_repository( + name = "com_github_apparentlymart_go_textseg_v12", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/apparentlymart/go-textseg/v12", + sum = "h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0=", + version = "v12.0.0", + ) + go_repository( + name = "com_github_apparentlymart_go_textseg_v13", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/apparentlymart/go-textseg/v13", + sum = "h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=", + version = "v13.0.0", + ) + go_repository( + name = "com_github_apparentlymart_go_textseg_v15", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/apparentlymart/go-textseg/v15", + sum = "h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=", + version = "v15.0.0", + ) + go_repository( + name = "com_github_armon_circbuf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/armon/circbuf", + sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=", + version = "v0.0.0-20150827004946-bbbad097214e", + ) + go_repository( + name = "com_github_armon_go_radix", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/armon/go-radix", + sum = "h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_armon_go_socks5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/armon/go-socks5", + sum = "h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=", + version = "v0.0.0-20160902184237-e75332964ef5", + ) + go_repository( + name = "com_github_asaskevich_govalidator", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/asaskevich/govalidator", + sum = "h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=", + version = "v0.0.0-20230301143203-a9d515a09cc2", + ) + go_repository( + name = "com_github_aws_aws_sdk_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go", + sum = "h1:BUhSaO2qLk2jkcyLebcvDmbdOunVe/Wq8RsCyI8szL0=", + version = "v1.50.22", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2", + sum = "h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBWDSQ=", + version = "v1.25.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_aws_protocol_eventstream", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream", + sum = "h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_config", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/config", + sum = "h1:oxvGd/cielb+oumJkQmXI0i5tQCRqfdCHV58AfE0pGY=", + version = "v1.27.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_credentials", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/credentials", + sum = "h1:H4WlK2OnVotRmbVgS8Ww2Z4B3/dDHxDS7cW6EiCECN4=", + version = "v1.17.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_feature_ec2_imds", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/feature/ec2/imds", + sum = "h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k=", + version = "v1.15.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_feature_s3_manager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/feature/s3/manager", + sum = "h1:PYWtYuCP+gYQ576MS4QRn7y1+kp+OZzzG7jlwnFj1wQ=", + version = "v1.16.3", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_configsources", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/internal/configsources", + sum = "h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_endpoints_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2", + sum = "h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80=", + version = "v2.6.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_ini", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/internal/ini", + sum = "h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_internal_v4a", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/internal/v4a", + sum = "h1:TkbRExyKSVHELwG9gz2+gql37jjec2R5vus9faTomwE=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_autoscaling", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/autoscaling", + sum = "h1:gdukBEVzo0O/0UiR0ee5zQokJ7RIP0p1jF00ayKHZ4o=", + version = "v1.39.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_cloudfront", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/cloudfront", + sum = "h1:fGpjGBqtfTz2mymcChLB42StEw0vHwsHqDFnctmoOQ8=", + version = "v1.34.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_ec2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/ec2", + sum = "h1:OGzK1PwB0sCE2Zwy6ISs/XSul4lrujQf3doXvmGqCwg=", + version = "v1.148.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_elasticloadbalancingv2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2", + sum = "h1:V+AqIZnytQg3jgEBIbvLYzxcMagTvC6kzhex0ZbDcTE=", + version = "v1.29.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_accept_encoding", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding", + sum = "h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_checksum", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/checksum", + sum = "h1:UiSyK6ent6OKpkMJN3+k5HZ4sk4UfchEaaW5wv7SblQ=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_presigned_url", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url", + sum = "h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_internal_s3shared", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/internal/s3shared", + sum = "h1:l5puwOHr7IxECuPMIuZG7UKOzAnF24v6t4l+Z5Moay4=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_kms", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/kms", + sum = "h1:W9PbZAZAEcelhhjb7KuwUtf+Lbc+i7ByYJRuWLlnxyQ=", + version = "v1.27.9", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_resourcegroupstaggingapi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi", + sum = "h1:TAKRHjyAtRMUeqsPnjzI4EXz3WtIo3IXRhJiIPa4MFo=", + version = "v1.20.2", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_s3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/s3", + sum = "h1:UxJGNZ+/VhocG50aui1p7Ub2NjDzijCpg8Y3NuznijM=", + version = "v1.50.2", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_secretsmanager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/secretsmanager", + sum = "h1:Wq73CAj0ktbUHufBTar4uMVzP7JHraTq6ZMloCAQxRk=", + version = "v1.27.2", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_sso", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/sso", + sum = "h1:GokXLGW3JkH/XzEVp1jDVRxty1eNGB7emkjDG1qxGK8=", + version = "v1.19.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_ssooidc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/ssooidc", + sum = "h1:2oxSGiYNxTHsuRuPD9McWvcvR6s61G3ssZLyQzcxQL0=", + version = "v1.22.1", + ) + go_repository( + name = "com_github_aws_aws_sdk_go_v2_service_sts", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/aws-sdk-go-v2/service/sts", + sum = "h1:QFT2KUWaVwwGi5/2sQNBOViFpLSkZmiyiHUxE2k6sOU=", + version = "v1.27.1", + ) + go_repository( + name = "com_github_aws_smithy_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/aws/smithy-go", + sum = "h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ=", + version = "v1.20.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go", + sum = "h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=", + version = "v68.0.0+incompatible", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_azcore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/azcore", + sum = "h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ=", + version = "v1.9.2", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_azidentity", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/azidentity", + sum = "h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_internal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/internal", + sum = "h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=", + version = "v1.5.2", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_keyvault_azkeys", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys", + sum = "h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM=", + version = "v0.10.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_keyvault_internal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal", + sum = "h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw=", + version = "v0.7.1", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5", + sum = "h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM=", + version = "v5.5.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_internal_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2", + sum = "h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5", + sum = "h1:9CrwzqQ+e8EqD+A2bh547GjBU4K0o30FhiTB981LFNI=", + version = "v5.0.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_resources_armresources", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources", + sum = "h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_resourcemanager_storage_armstorage", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage", + sum = "h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_security_keyvault_azkeys", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys", + sum = "h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_security_keyvault_azsecrets", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets", + sum = "h1:h4Zxgmi9oyZL2l8jeg1iRTqPloHktywWcu0nlJmo1tA=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_security_keyvault_internal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal", + sum = "h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_storage_azblob", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob", + sum = "h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_azure_azure_sdk_for_go_sdk_storage_azblob_testdata_perf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/testdata/perf", + sum = "h1:45Ajiuhu6AeJTFdwxn2OWXZTQOHdXT1U/aezrVu6HIM=", + version = "v0.0.0-20240208231215-981108a6de20", + ) + go_repository( + name = "com_github_azure_go_ansiterm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-ansiterm", + sum = "h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=", + version = "v0.0.0-20230124172434-306776ec8161", + ) + go_repository( + name = "com_github_azure_go_autorest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest", + sum = "h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=", + version = "v14.2.0+incompatible", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest", + sum = "h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=", + version = "v0.11.29", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_adal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest/adal", + sum = "h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=", + version = "v0.9.23", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_date", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest/date", + sum = "h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_mocks", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest/mocks", + sum = "h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=", + version = "v0.4.2", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_to", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest/to", + sum = "h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_azure_go_autorest_autorest_validation", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/autorest/validation", + sum = "h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_azure_go_autorest_logger", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/logger", + sum = "h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_azure_go_autorest_tracing", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Azure/go-autorest/tracing", + sum = "h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_azuread_microsoft_authentication_library_for_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/AzureAD/microsoft-authentication-library-for-go", + sum = "h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_bazelbuild_buildtools", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bazelbuild/buildtools", + sum = "h1:XmPu4mXICgdGnC5dXGjUGbwUD/kUmS0l5Aop3LaevBM=", + version = "v0.0.0-20230317132445-9c3c1fc0106e", + ) + go_repository( + name = "com_github_bazelbuild_rules_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bazelbuild/rules_go", + sum = "h1:aY2smc3JWyUKOjGYmOKVLX70fPK9ON0rtwQojuIeUHc=", + version = "v0.42.0", + ) + go_repository( + name = "com_github_beorn7_perks", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/beorn7/perks", + sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_bgentry_speakeasy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bgentry/speakeasy", + sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_blang_semver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/blang/semver", + sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=", + version = "v3.5.1+incompatible", + ) + go_repository( + name = "com_github_blang_semver_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/blang/semver/v4", + sum = "h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=", + version = "v4.0.0", + ) + go_repository( + name = "com_github_bshuster_repo_logrus_logstash_hook", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bshuster-repo/logrus-logstash-hook", + sum = "h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_bufbuild_protocompile", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bufbuild/protocompile", + sum = "h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_bufbuild_protovalidate_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bufbuild/protovalidate-go", + sum = "h1:pJr07sYhliyfj/STAM7hU4J3FKpVeLVKvOBmOTN8j+s=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_bugsnag_bugsnag_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bugsnag/bugsnag-go", + sum = "h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=", + version = "v0.0.0-20141110184014-b1d153021fcd", + ) + go_repository( + name = "com_github_bugsnag_osext", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bugsnag/osext", + sum = "h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=", + version = "v0.0.0-20130617224835-0dd3f918b21b", + ) + go_repository( + name = "com_github_bugsnag_panicwrap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bugsnag/panicwrap", + sum = "h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=", + version = "v0.0.0-20151223152923-e2c28503fcd0", + ) + go_repository( + name = "com_github_burntsushi_toml", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/BurntSushi/toml", + sum = "h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_burntsushi_xgb", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/BurntSushi/xgb", + sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", + version = "v0.0.0-20160522181843-27f122750802", + ) + go_repository( + name = "com_github_bwesterb_go_ristretto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/bwesterb/go-ristretto", + sum = "h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw=", + version = "v1.2.3", + ) + go_repository( + name = "com_github_cavaliercoder_badio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cavaliercoder/badio", + sum = "h1:YYUjy5BRwO5zPtfk+aa2gw255FIIoi93zMmuy19o0bc=", + version = "v0.0.0-20160213150051-ce5280129e9e", + ) + go_repository( + name = "com_github_cavaliercoder_go_rpm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cavaliercoder/go-rpm", + sum = "h1:jP7ki8Tzx9ThnFPLDhBYAhEpI2+jOURnHQNURgsMvnY=", + version = "v0.0.0-20200122174316-8cb9fd9c31a8", + ) + go_repository( + name = "com_github_cavaliergopher_cpio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cavaliergopher/cpio", + sum = "h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_cenkalti_backoff_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cenkalti/backoff/v3", + sum = "h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=", + version = "v3.2.2", + ) + go_repository( + name = "com_github_cenkalti_backoff_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cenkalti/backoff/v4", + sum = "h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=", + version = "v4.2.1", + ) + go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=", + version = "v0.4.1", + ) + go_repository( + name = "com_github_cespare_xxhash", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cespare/xxhash", + sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_cespare_xxhash_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cespare/xxhash/v2", + sum = "h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=", + version = "v2.2.0", + ) + go_repository( + name = "com_github_chai2010_gettext_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chai2010/gettext-go", + sum = "h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_checkpoint_restore_go_criu_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/checkpoint-restore/go-criu/v5", + sum = "h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=", + version = "v5.3.0", + ) + go_repository( + name = "com_github_chromedp_cdproto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chromedp/cdproto", + sum = "h1:aPflPkRFkVwbW6dmcVqfgwp1i+UWGFH6VgR1Jim5Ygc=", + version = "v0.0.0-20230802225258-3cf4e6d46a89", + ) + go_repository( + name = "com_github_chromedp_chromedp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chromedp/chromedp", + sum = "h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw=", + version = "v0.9.2", + ) + go_repository( + name = "com_github_chromedp_sysutil", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chromedp/sysutil", + sum = "h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_chzyer_logex", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chzyer/logex", + sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=", + version = "v1.1.10", + ) + go_repository( + name = "com_github_chzyer_readline", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chzyer/readline", + sum = "h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_chzyer_test", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/chzyer/test", + sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=", + version = "v0.0.0-20180213035817-a1ea475d72b1", + ) + go_repository( + name = "com_github_cilium_ebpf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cilium/ebpf", + sum = "h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_client9_misspell", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", + ) + go_repository( + name = "com_github_cloudflare_circl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cloudflare/circl", + # keep + patches = [ + "//3rdparty/bazel/com_github_cloudflare_circl:math_fp448_BUILD_bazel.patch", + "//3rdparty/bazel/com_github_cloudflare_circl:math_fp25519_BUILD_bazel.patch", + "//3rdparty/bazel/com_github_cloudflare_circl:dh_x448_BUILD_bazel.patch", + "//3rdparty/bazel/com_github_cloudflare_circl:dh_x25519_BUILD_bazel.patch", + ], + sum = "h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=", + version = "v1.3.7", + ) + go_repository( + name = "com_github_cncf_udpa_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cncf/udpa/go", + sum = "h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=", + version = "v0.0.0-20220112060539-c52dc94e7fbe", + ) + go_repository( + name = "com_github_cncf_xds_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cncf/xds/go", + sum = "h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY=", + version = "v0.0.0-20231109132714-523115ebc101", + ) + go_repository( + name = "com_github_codahale_rfc6979", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/codahale/rfc6979", + sum = "h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=", + version = "v0.0.0-20141003034818-6a90f24967eb", + ) + go_repository( + name = "com_github_common_nighthawk_go_figure", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/common-nighthawk/go-figure", + sum = "h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=", + version = "v0.0.0-20210622060536-734e95fb86be", + ) + go_repository( + name = "com_github_container_storage_interface_spec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/container-storage-interface/spec", + sum = "h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_containerd_aufs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/aufs", + sum = "h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_containerd_btrfs_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/btrfs/v2", + sum = "h1:FN4wsx7KQrYoLXN7uLP0vBV4oVWHOIKDRQ1G2Z0oL5M=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_containerd_cgroups", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/cgroups", + sum = "h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_containerd_cgroups_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/cgroups/v3", + sum = "h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=", + version = "v3.0.2", + ) + go_repository( + name = "com_github_containerd_console", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/console", + sum = "h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=", + version = "v1.0.3", + ) + go_repository( + name = "com_github_containerd_containerd", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/containerd", + sum = "h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is=", + version = "v1.7.13", + ) + go_repository( + name = "com_github_containerd_continuity", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/continuity", + sum = "h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=", + version = "v0.4.2", + ) + go_repository( + name = "com_github_containerd_fifo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/fifo", + sum = "h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_containerd_go_cni", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/go-cni", + sum = "h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU=", + version = "v1.1.9", + ) + go_repository( + name = "com_github_containerd_go_runc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/go-runc", + sum = "h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_containerd_imgcrypt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/imgcrypt", + sum = "h1:WSf9o9EQ0KGHiUx2ESFZ+PKf4nxK9BcvV/nJDX8RkB4=", + version = "v1.1.7", + ) + go_repository( + name = "com_github_containerd_log", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/log", + sum = "h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_containerd_nri", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/nri", + sum = "h1:PjgIBm0RtUiFyEO6JqPBQZRQicbsIz41Fz/5VSC0zgw=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_containerd_stargz_snapshotter_estargz", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/stargz-snapshotter/estargz", + sum = "h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=", + version = "v0.14.3", + ) + go_repository( + name = "com_github_containerd_ttrpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/ttrpc", + sum = "h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_containerd_typeurl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/typeurl", + sum = "h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_containerd_typeurl_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/typeurl/v2", + sum = "h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=", + version = "v2.1.1", + ) + go_repository( + name = "com_github_containerd_zfs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containerd/zfs", + sum = "h1:n7OZ7jZumLIqNJqXrEc/paBM840mORnmGdJDmAmJZHM=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_containernetworking_cni", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containernetworking/cni", + sum = "h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_containernetworking_plugins", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containernetworking/plugins", + sum = "h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_containers_ocicrypt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/containers/ocicrypt", + sum = "h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=", + version = "v1.1.6", + ) + go_repository( + name = "com_github_coredns_caddy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coredns/caddy", + sum = "h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_coredns_corefile_migration", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coredns/corefile-migration", + sum = "h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE=", + version = "v1.0.21", + ) + go_repository( + name = "com_github_coreos_go_oidc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coreos/go-oidc", + sum = "h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk=", + version = "v2.2.1+incompatible", + ) + go_repository( + name = "com_github_coreos_go_oidc_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coreos/go-oidc/v3", + sum = "h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=", + version = "v3.9.0", + ) + go_repository( + name = "com_github_coreos_go_semver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coreos/go-semver", + sum = "h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_coreos_go_systemd_v22", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/coreos/go-systemd/v22", + sum = "h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=", + version = "v22.5.0", + ) + go_repository( + name = "com_github_cosi_project_runtime", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cosi-project/runtime", + sum = "h1:3ripxk5ox93TOmYn0WMbddd5XLerG9URonb5XG4GcFU=", + version = "v0.3.19", + ) + go_repository( + name = "com_github_cpuguy83_go_md2man_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cpuguy83/go-md2man/v2", + sum = "h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=", + version = "v2.0.3", + ) + go_repository( + name = "com_github_creack_pty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/creack/pty", + sum = "h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=", + version = "v1.1.21", + ) + go_repository( + name = "com_github_cyberphone_json_canonicalization", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cyberphone/json-canonicalization", + sum = "h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=", + version = "v0.0.0-20231217050601-ba74d44ecf5f", + ) + go_repository( + name = "com_github_cyphar_filepath_securejoin", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/cyphar/filepath-securejoin", + sum = "h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=", + version = "v0.2.4", + ) + go_repository( + name = "com_github_danieljoos_wincred", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/danieljoos/wincred", + sum = "h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_danwinship_knftables", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/danwinship/knftables", + sum = "h1:89Ieiia6MMfXWQF9dyaou1CwBU8h8sHa2Zo3OlY2o04=", + version = "v0.0.13", + ) + go_repository( + name = "com_github_data_dog_go_sqlmock", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/DATA-DOG/go-sqlmock", + sum = "h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=", + version = "v1.5.2", + ) + go_repository( + name = "com_github_davecgh_go_spew", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/davecgh/go-spew", + sum = "h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=", + version = "v1.1.2-0.20180830191138-d8f796af33cc", + ) + go_repository( + name = "com_github_daviddengcn_go_colortext", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/daviddengcn/go-colortext", + sum = "h1:ANqDyC0ys6qCSvuEK7l3g5RaehL/Xck9EX8ATG8oKsE=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_decred_dcrd_dcrec_secp256k1_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/decred/dcrd/dcrec/secp256k1/v4", + sum = "h1:1iy2qD6JEhHKKhUOA9IWs7mjco7lnw2qx8FsRI2wirE=", + version = "v4.0.0-20210816181553-5444fa50b93d", + ) + go_repository( + name = "com_github_denisenkom_go_mssqldb", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/denisenkom/go-mssqldb", + sum = "h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk=", + version = "v0.9.0", + ) + go_repository( + name = "com_github_dgryski_go_rendezvous", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/dgryski/go-rendezvous", + sum = "h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=", + version = "v0.0.0-20200823014737-9f7001d12a5f", + ) + go_repository( + name = "com_github_distribution_distribution_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/distribution/distribution/v3", + sum = "h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc=", + version = "v3.0.0-20221208165359-362910506bc2", + ) + go_repository( + name = "com_github_distribution_reference", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/distribution/reference", + sum = "h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_dnaeon_go_vcr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/dnaeon/go-vcr", + sum = "h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_docker_cli", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/cli", + sum = "h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=", + version = "v25.0.3+incompatible", + ) + go_repository( + name = "com_github_docker_distribution", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/distribution", + sum = "h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=", + version = "v2.8.3+incompatible", + ) + go_repository( + name = "com_github_docker_docker", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/docker", + sum = "h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=", + version = "v25.0.3+incompatible", + ) + go_repository( + name = "com_github_docker_docker_credential_helpers", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/docker-credential-helpers", + sum = "h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=", + version = "v0.8.1", + ) + go_repository( + name = "com_github_docker_go_connections", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/go-connections", + sum = "h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_docker_go_events", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/go-events", + sum = "h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=", + version = "v0.0.0-20190806004212-e31b211e4f1c", + ) + go_repository( + name = "com_github_docker_go_metrics", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/go-metrics", + sum = "h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=", + version = "v0.0.1", + ) + go_repository( + name = "com_github_docker_go_units", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/go-units", + sum = "h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_docker_libtrust", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/docker/libtrust", + sum = "h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=", + version = "v0.0.0-20160708172513-aabc10ec26b7", + ) + go_repository( + name = "com_github_dustin_go_humanize", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/dustin/go-humanize", + sum = "h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_edgelesssys_go_azguestattestation", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/edgelesssys/go-azguestattestation", + sum = "h1:1iKB7b+i7svWC0aKXwggi+kHf0K57g8r9hN4VOpJYYg=", + version = "v0.0.0-20230707101700-a683be600fcf", + ) + go_repository( + name = "com_github_edgelesssys_go_tdx_qpl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/edgelesssys/go-tdx-qpl", + sum = "h1:TCGUmmH50cQBGXPJsn32APf93fmWQXcSMi7pMbDPtV0=", + version = "v0.0.0-20240123150912-dcad3c41ec5f", + ) + go_repository( + name = "com_github_eggsampler_acme_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/eggsampler/acme/v3", + sum = "h1:LHWnB3wShVshK1+umL6ObCjnc0MM+D7TE8JINjk8zGY=", + version = "v3.4.0", + ) + go_repository( + name = "com_github_emicklei_go_restful_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/emicklei/go-restful/v3", + sum = "h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU=", + version = "v3.11.2", + ) + go_repository( + name = "com_github_emirpasic_gods", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/emirpasic/gods", + sum = "h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=", + version = "v1.18.1", + ) + go_repository( + name = "com_github_envoyproxy_go_control_plane", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=", + version = "v0.11.1", + ) + go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_euank_go_kmsg_parser", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/euank/go-kmsg-parser", + sum = "h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=", + version = "v2.0.0+incompatible", + ) + go_repository( + name = "com_github_evanphx_json_patch", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/evanphx/json-patch", + sum = "h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=", + version = "v5.9.0+incompatible", + ) + go_repository( + name = "com_github_evanphx_json_patch_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/evanphx/json-patch/v5", + sum = "h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=", + version = "v5.9.0", + ) + go_repository( + name = "com_github_exponent_io_jsonpath", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/exponent-io/jsonpath", + sum = "h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=", + version = "v0.0.0-20210407135951-1de76d718b3f", + ) + go_repository( + name = "com_github_fatih_camelcase", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fatih/camelcase", + sum = "h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_fatih_color", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fatih/color", + sum = "h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=", + version = "v1.16.0", + ) + go_repository( + name = "com_github_felixge_httpsnoop", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/felixge/httpsnoop", + sum = "h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_flynn_go_docopt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/flynn/go-docopt", + sum = "h1:Ss/B3/5wWRh8+emnK0++g5zQzwDTi30W10pKxKc4JXI=", + version = "v0.0.0-20140912013429-f6dd2ebbb31e", + ) + go_repository( + name = "com_github_flynn_go_shlex", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/flynn/go-shlex", + sum = "h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=", + version = "v0.0.0-20150515145356-3f9db97f8568", + ) + go_repository( + name = "com_github_foxboron_go_uefi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/foxboron/go-uefi", + sum = "h1:qGlg/7H49H30Eu7nkCBA7YxNmW30ephqBf7xIxlAGuQ=", + version = "v0.0.0-20240128152106-48be911532c2", + ) + go_repository( + name = "com_github_foxcpp_go_mockdns", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/foxcpp/go-mockdns", + sum = "h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_frankban_quicktest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/frankban/quicktest", + sum = "h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=", + version = "v1.14.6", + ) + go_repository( + name = "com_github_fsnotify_fsnotify", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fsnotify/fsnotify", + sum = "h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=", + version = "v1.7.0", + ) + go_repository( + name = "com_github_fullstorydev_grpcurl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fullstorydev/grpcurl", + sum = "h1:JMvZXK8lHDGyLmTQ0ZdGDnVVGuwjbpaumf8p42z0d+c=", + version = "v1.8.9", + ) + go_repository( + name = "com_github_fvbommel_sortorder", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fvbommel/sortorder", + sum = "h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_fxamacker_cbor_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/fxamacker/cbor/v2", + sum = "h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=", + version = "v2.4.0", + ) + go_repository( + name = "com_github_gabriel_vasile_mimetype", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gabriel-vasile/mimetype", + sum = "h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=", + version = "v1.4.3", + ) + go_repository( + name = "com_github_gertd_go_pluralize", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gertd/go-pluralize", + sum = "h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlLgiA=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_ghodss_yaml", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ghodss/yaml", + sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_gliderlabs_ssh", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gliderlabs/ssh", + sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=", + version = "v0.2.2", + ) + go_repository( + name = "com_github_go_chi_chi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-chi/chi", + sum = "h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=", + version = "v4.1.2+incompatible", + ) + go_repository( + name = "com_github_go_errors_errors", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-errors/errors", + sum = "h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_go_git_gcfg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-git/gcfg", + sum = "h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=", + version = "v1.5.1-0.20230307220236-3a3c6141e376", + ) + go_repository( + name = "com_github_go_git_go_billy_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-git/go-billy/v5", + sum = "h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=", + version = "v5.5.0", + ) + go_repository( + name = "com_github_go_git_go_git_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-git/go-git/v5", + sum = "h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=", + version = "v5.11.0", + ) + go_repository( + name = "com_github_go_gl_glfw", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-gl/glfw", + sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=", + version = "v0.0.0-20190409004039-e6da0acd62b1", + ) + go_repository( + name = "com_github_go_gl_glfw_v3_3_glfw", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-gl/glfw/v3.3/glfw", + sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=", + version = "v0.0.0-20200222043503-6f7a984d4dc4", + ) + go_repository( + name = "com_github_go_gorp_gorp_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-gorp/gorp/v3", + sum = "h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=", + version = "v3.1.0", + ) + go_repository( + name = "com_github_go_jose_go_jose_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-jose/go-jose/v3", + sum = "h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=", + version = "v3.0.1", + ) + go_repository( + name = "com_github_go_kit_kit", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-kit/kit", + sum = "h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=", + version = "v0.8.0", + ) + go_repository( + name = "com_github_go_kit_log", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-kit/log", + sum = "h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_go_logfmt_logfmt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-logfmt/logfmt", + sum = "h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_go_logr_logr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-logr/logr", + sum = "h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=", + version = "v1.4.1", + ) + go_repository( + name = "com_github_go_logr_stdr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-logr/stdr", + sum = "h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=", + version = "v1.2.2", + ) + go_repository( + name = "com_github_go_logr_zapr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-logr/zapr", + sum = "h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_go_openapi_analysis", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/analysis", + sum = "h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=", + version = "v0.22.2", + ) + go_repository( + name = "com_github_go_openapi_errors", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/errors", + sum = "h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=", + version = "v0.21.0", + ) + go_repository( + name = "com_github_go_openapi_jsonpointer", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/jsonpointer", + sum = "h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=", + version = "v0.20.2", + ) + go_repository( + name = "com_github_go_openapi_jsonreference", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/jsonreference", + sum = "h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=", + version = "v0.20.4", + ) + go_repository( + name = "com_github_go_openapi_loads", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/loads", + sum = "h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0=", + version = "v0.21.5", + ) + go_repository( + name = "com_github_go_openapi_runtime", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/runtime", + sum = "h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto=", + version = "v0.27.1", + ) + go_repository( + name = "com_github_go_openapi_spec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/spec", + sum = "h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do=", + version = "v0.20.14", + ) + go_repository( + name = "com_github_go_openapi_strfmt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/strfmt", + sum = "h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=", + version = "v0.22.0", + ) + go_repository( + name = "com_github_go_openapi_swag", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/swag", + sum = "h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=", + version = "v0.22.9", + ) + go_repository( + name = "com_github_go_openapi_validate", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-openapi/validate", + sum = "h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=", + version = "v0.23.0", + ) + go_repository( + name = "com_github_go_playground_assert_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-playground/assert/v2", + sum = "h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=", + version = "v2.2.0", + ) + go_repository( + name = "com_github_go_playground_locales", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-playground/locales", + sum = "h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=", + version = "v0.14.1", + ) + go_repository( + name = "com_github_go_playground_universal_translator", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-playground/universal-translator", + sum = "h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=", + version = "v0.18.1", + ) + go_repository( + name = "com_github_go_playground_validator_v10", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-playground/validator/v10", + sum = "h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=", + version = "v10.14.1", + ) + go_repository( + name = "com_github_go_redis_redismock_v9", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-redis/redismock/v9", + sum = "h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw=", + version = "v9.2.0", + ) + go_repository( + name = "com_github_go_rod_rod", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-rod/rod", + sum = "h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c=", + version = "v0.114.5", + ) + go_repository( + name = "com_github_go_sql_driver_mysql", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-sql-driver/mysql", + sum = "h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=", + version = "v1.7.1", + ) + go_repository( + name = "com_github_go_stack_stack", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-stack/stack", + sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_go_task_slim_sprig", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-task/slim-sprig", + sum = "h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=", + version = "v0.0.0-20230315185526-52ccab3ef572", + ) + go_repository( + name = "com_github_go_test_deep", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/go-test/deep", + sum = "h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_gobwas_glob", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gobwas/glob", + sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_gobwas_httphead", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gobwas/httphead", + sum = "h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_gobwas_pool", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gobwas/pool", + sum = "h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_gobwas_ws", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gobwas/ws", + sum = "h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_goccy_go_json", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/goccy/go-json", + sum = "h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM=", + version = "v0.9.7", + ) + go_repository( + name = "com_github_godbus_dbus_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/godbus/dbus/v5", + sum = "h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=", + version = "v5.1.0", + ) + go_repository( + name = "com_github_godror_godror", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/godror/godror", + sum = "h1:X1e7hUd02GDaLWKZj40Z7L0CP0W9TrGgmPQZw6+anBg=", + version = "v0.40.4", + ) + go_repository( + name = "com_github_godror_knownpb", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/godror/knownpb", + sum = "h1:A4J7jdx7jWBhJm18NntafzSC//iZDHkDi1+juwQ5pTI=", + version = "v0.1.1", + ) + go_repository( + name = "com_github_gofrs_flock", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gofrs/flock", + sum = "h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=", + version = "v0.8.1", + ) + go_repository( + name = "com_github_gofrs_uuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gofrs/uuid", + sum = "h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=", + version = "v4.4.0+incompatible", + ) + go_repository( + name = "com_github_gogo_protobuf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gogo/protobuf", + sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_golang_glog", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang/glog", + sum = "h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_golang_groupcache", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang/groupcache", + sum = "h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=", + version = "v0.0.0-20210331224755-41bb18bfe9da", + ) + go_repository( + name = "com_github_golang_jwt_jwt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang-jwt/jwt", + sum = "h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=", + version = "v3.2.1+incompatible", + ) + go_repository( + name = "com_github_golang_jwt_jwt_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang-jwt/jwt/v4", + sum = "h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=", + version = "v4.5.0", + ) + go_repository( + name = "com_github_golang_jwt_jwt_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang-jwt/jwt/v5", + sum = "h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=", + version = "v5.2.0", + ) + go_repository( + name = "com_github_golang_mock", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang/mock", + sum = "h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_golang_protobuf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang/protobuf", + sum = "h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=", + version = "v1.5.3", + ) + go_repository( + name = "com_github_golang_snappy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang/snappy", + sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=", + version = "v0.0.4", + ) + go_repository( + name = "com_github_golang_sql_civil", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/golang-sql/civil", + sum = "h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=", + version = "v0.0.0-20190719163853-cb61b32ac6fe", + ) + go_repository( + name = "com_github_gomodule_redigo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gomodule/redigo", + sum = "h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=", + version = "v1.8.2", + ) + go_repository( + name = "com_github_google_btree", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/btree", + sum = "h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_google_cadvisor", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/cadvisor", + sum = "h1:eyYTxKBd+KxI1kh6rst4JSTLUhfHQM34qGpp+0AMlSg=", + version = "v0.48.1", + ) + go_repository( + name = "com_github_google_cel_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/cel-go", + sum = "h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=", + version = "v0.17.7", + ) + go_repository( + name = "com_github_google_certificate_transparency_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/certificate-transparency-go", + sum = "h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw=", + version = "v1.1.7", + ) + go_repository( + name = "com_github_google_gnostic_models", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/gnostic-models", + sum = "h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=", + version = "v0.6.8", + ) + go_repository( + name = "com_github_google_go_attestation", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-attestation", + sum = "h1:jqtOrLk5MNdliTKjPbIPrAaRKJaKW+0LIU2n/brJYms=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_google_go_cmdtest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-cmdtest", + sum = "h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU=", + version = "v0.4.1-0.20220921163831-55ab3332a786", + ) + go_repository( + name = "com_github_google_go_cmp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-cmp", + sum = "h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_google_go_configfs_tsm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-configfs-tsm", + sum = "h1:YnJ9rXIOj5BYD7/0DNnzs8AOp7UcvjfTvt215EWcs98=", + version = "v0.2.2", + ) + go_repository( + name = "com_github_google_go_containerregistry", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-containerregistry", + sum = "h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic=", + version = "v0.19.0", + ) + go_repository( + name = "com_github_google_go_licenses", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-licenses", + sum = "h1:MM+VCXf0slYkpWO0mECvdYDVCxZXIQNal5wqUIXEZ/A=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_google_go_pkcs11", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-pkcs11", + sum = "h1:OF1IPgv+F4NmqmJ98KTjdN97Vs1JxDPB3vbmYzV2dpk=", + version = "v0.2.1-0.20230907215043-c6f79328ddf9", + ) + go_repository( + name = "com_github_google_go_replayers_httpreplay", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-replayers/httpreplay", + sum = "h1:H91sIMlt1NZzN7R+/ASswyouLJfW0WLW7fhyUFvDEkY=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_google_go_sev_guest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-sev-guest", + replace = "github.com/google/go-sev-guest", + sum = "h1:6o4Z/vQqNUH+cEagfx1Ez5ElK70iZulEXZwmLnRo44I=", + version = "v0.0.0-20230928233922-2dcbba0a4b9d", + ) + go_repository( + name = "com_github_google_go_tdx_guest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-tdx-guest", + sum = "h1:gl0KvjdsD4RrJzyLefDOvFOUH3NAJri/3qvaL5m83Iw=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_google_go_tpm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-tpm", + replace = "github.com/thomasten/go-tpm", + sum = "h1:840nUyrM9df2aLuzWuIkYx/DrUbX4KQZO6B9LD45aWo=", + version = "v0.0.0-20230629092004-f43f8e2a59eb", + ) + go_repository( + name = "com_github_google_go_tpm_tools", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-tpm-tools", + # keep + patches = [ + "//3rdparty/bazel/com_github_google_go_tpm_tools:com_github_google_go_tpm_tools.patch", + "//3rdparty/bazel/com_github_google_go_tpm_tools:ms_tpm_20_ref.patch", + "//3rdparty/bazel/com_github_google_go_tpm_tools:include.patch", + ], + sum = "h1:EQ1rGgyI8IEBApvDH9HPF7ehUd/6H6SxSNKVDF5z/GU=", + version = "v0.4.3-0.20240112165732-912a43636883", + ) + go_repository( + name = "com_github_google_go_tspi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/go-tspi", + sum = "h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_google_gofuzz", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/gofuzz", + sum = "h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_google_goterm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/goterm", + sum = "h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4=", + version = "v0.0.0-20200907032337-555d40f16ae2", + ) + go_repository( + name = "com_github_google_keep_sorted", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/keep-sorted", + sum = "h1:nsDd3h16Bf1KFNtfvzGoLaei95AMLswikiw1ICDOKPE=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_google_licenseclassifier", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/licenseclassifier", + sum = "h1:TJsAqW6zLRMDTyGmc9TPosfn9OyVlHs8Hrn3pY6ONSY=", + version = "v0.0.0-20210722185704-3043a050f148", + ) + go_repository( + name = "com_github_google_logger", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/logger", + sum = "h1:+6Z2geNxc9G+4D4oDO9njjjn2d0wN5d7uOo0vOIW1NQ=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_google_martian", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/martian", + sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=", + version = "v2.1.0+incompatible", + ) + go_repository( + name = "com_github_google_martian_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/martian/v3", + sum = "h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=", + version = "v3.3.2", + ) + go_repository( + name = "com_github_google_pprof", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/pprof", + sum = "h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0=", + version = "v0.0.0-20231023181126-ff6d637d2a7b", + ) + go_repository( + name = "com_github_google_renameio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/renameio", + sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_google_renameio_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/renameio/v2", + sum = "h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_google_rpmpack", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/rpmpack", + sum = "h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_google_s2a_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/s2a-go", + sum = "h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=", + version = "v0.1.7", + ) + go_repository( + name = "com_github_google_shlex", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/shlex", + sum = "h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=", + version = "v0.0.0-20191202100458-e7afc7fbc510", + ) + go_repository( + name = "com_github_google_trillian", + build_file_generation = "on", + build_file_name = "", # keep + build_file_proto_mode = "disable_global", + importpath = "github.com/google/trillian", + sum = "h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_google_uuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/uuid", + sum = "h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_google_wire", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/wire", + sum = "h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_googleapis_enterprise_certificate_proxy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/googleapis/enterprise-certificate-proxy", + sum = "h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=", + version = "v0.3.2", + ) + go_repository( + name = "com_github_googleapis_gax_go_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/googleapis/gax-go/v2", + sum = "h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM=", + version = "v2.12.1", + ) + go_repository( + name = "com_github_googleapis_go_type_adapters", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/googleapis/go-type-adapters", + sum = "h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_googleapis_google_cloud_go_testing", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/googleapis/google-cloud-go-testing", + sum = "h1:zC34cGQu69FG7qzJ3WiKW244WfhDC3xxYMeNOX2gtUQ=", + version = "v0.0.0-20210719221736-1c9a4c676720", + ) + go_repository( + name = "com_github_googlecloudplatform_k8s_cloud_provider", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/GoogleCloudPlatform/k8s-cloud-provider", + sum = "h1:Heo1J/ttaQFgGJSVnCZquy3e5eH5j1nqxBuomztB3P0=", + version = "v1.18.1-0.20220218231025-f11817397a1b", + ) + go_repository( + name = "com_github_gophercloud_gophercloud", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gophercloud/gophercloud", + sum = "h1:zKvmHOmHuaZlnx9d2DJpEgbMxrGt/+CJ/bKOKQh9Xzo=", + version = "v1.9.0", + ) + go_repository( + name = "com_github_gophercloud_utils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gophercloud/utils", + sum = "h1:sH7xkTfYzxIEgzq1tDHIMKRh1vThOEOGNsettdEeLbE=", + version = "v0.0.0-20231010081019-80377eca5d56", + ) + go_repository( + name = "com_github_gorilla_handlers", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gorilla/handlers", + sum = "h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_gorilla_mux", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gorilla/mux", + sum = "h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_gorilla_websocket", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gorilla/websocket", + sum = "h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_gosuri_uitable", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gosuri/uitable", + sum = "h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=", + version = "v0.0.4", + ) + go_repository( + name = "com_github_gregjones_httpcache", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/gregjones/httpcache", + sum = "h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=", + version = "v0.0.0-20190611155906-901d90724c79", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_middleware", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/grpc-ecosystem/go-grpc-middleware", + sum = "h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_middleware_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/grpc-ecosystem/go-grpc-middleware/v2", + sum = "h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY=", + version = "v2.0.1", + ) + go_repository( + name = "com_github_grpc_ecosystem_go_grpc_prometheus", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/grpc-ecosystem/go-grpc-prometheus", + sum = "h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_grpc_gateway", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/grpc-ecosystem/grpc-gateway", + sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=", + version = "v1.16.0", + ) + go_repository( + name = "com_github_grpc_ecosystem_grpc_gateway_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2", + sum = "h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U=", + version = "v2.18.1", + ) + go_repository( + name = "com_github_hashicorp_cli", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/cli", + sum = "h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8=", + version = "v1.1.6", + ) + go_repository( + name = "com_github_hashicorp_errwrap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/errwrap", + sum = "h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_hashicorp_go_checkpoint", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-checkpoint", + sum = "h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_hashicorp_go_cleanhttp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-cleanhttp", + sum = "h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=", + version = "v0.5.2", + ) + go_repository( + name = "com_github_hashicorp_go_cty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-cty", + sum = "h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI=", + version = "v1.4.1-0.20200414143053-d3edf31b6320", + ) + go_repository( + name = "com_github_hashicorp_go_hclog", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-hclog", + sum = "h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I=", + version = "v1.6.2", + ) + go_repository( + name = "com_github_hashicorp_go_kms_wrapping_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-kms-wrapping/v2", + sum = "h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0=", + version = "v2.0.16", + ) + go_repository( + name = "com_github_hashicorp_go_kms_wrapping_wrappers_awskms_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2", + sum = "h1:qdxeZvDMRGZ3YSE4Oz0Pp7WUSUn5S6cWZguEOkEVL50=", + version = "v2.0.9", + ) + go_repository( + name = "com_github_hashicorp_go_kms_wrapping_wrappers_azurekeyvault_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2", + sum = "h1:/7SKkYIhA8cr3l8m1EKT6Q90bPoSVqqVBuQ6HgoMIkw=", + version = "v2.0.11", + ) + go_repository( + name = "com_github_hashicorp_go_kms_wrapping_wrappers_gcpckms_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2", + sum = "h1:qXOa2uFzT8eORzgfLZSp1dvig2l/70LJIr6634f5HMM=", + version = "v2.0.11", + ) + go_repository( + name = "com_github_hashicorp_go_multierror", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-multierror", + sum = "h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_hashicorp_go_plugin", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-plugin", + sum = "h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_hashicorp_go_retryablehttp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-retryablehttp", + sum = "h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=", + version = "v0.7.5", + ) + go_repository( + name = "com_github_hashicorp_go_rootcerts", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-rootcerts", + sum = "h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_hashicorp_go_secure_stdlib_awsutil", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-secure-stdlib/awsutil", + sum = "h1:I8bynUKMh9I7JdwtW9voJ0xmHvBpxQtLjrMFDYmhOxY=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_hashicorp_go_secure_stdlib_parseutil", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-secure-stdlib/parseutil", + sum = "h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=", + version = "v0.1.7", + ) + go_repository( + name = "com_github_hashicorp_go_secure_stdlib_strutil", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-secure-stdlib/strutil", + sum = "h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=", + version = "v0.1.2", + ) + go_repository( + name = "com_github_hashicorp_go_sockaddr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-sockaddr", + sum = "h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_hashicorp_go_uuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-uuid", + sum = "h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=", + version = "v1.0.3", + ) + go_repository( + name = "com_github_hashicorp_go_version", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/go-version", + sum = "h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_hashicorp_golang_lru", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/golang-lru", + sum = "h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=", + version = "v0.5.4", + ) + go_repository( + name = "com_github_hashicorp_hc_install", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/hc-install", + sum = "h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs=", + version = "v0.6.3", + ) + go_repository( + name = "com_github_hashicorp_hcl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/hcl", + sum = "h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_hcl_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/hcl/v2", + sum = "h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=", + version = "v2.19.1", + ) + go_repository( + name = "com_github_hashicorp_logutils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/logutils", + sum = "h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_exec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-exec", + sum = "h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo=", + version = "v0.20.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_json", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-json", + sum = "h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U=", + version = "v0.21.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_framework", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-framework", + sum = "h1:8kcvqJs/x6QyOFSdeAyEgsenVOUeC/IyKpi2ul4fjTg=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_framework_validators", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-framework-validators", + sum = "h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc=", + version = "v0.12.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-go", + sum = "h1:VSjdVQYNDKR0l2pi3vsFK1PdMQrw6vGOshJXMNFeVc0=", + version = "v0.21.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_log", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-log", + sum = "h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=", + version = "v0.9.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_sdk_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-sdk/v2", + sum = "h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc=", + version = "v2.30.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_plugin_testing", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-plugin-testing", + sum = "h1:Wsnfh+7XSVRfwcr2jZYHsnLOnZl7UeaOBvsx6dl/608=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_hashicorp_terraform_registry_address", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-registry-address", + sum = "h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_hashicorp_terraform_svchost", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/terraform-svchost", + sum = "h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ=", + version = "v0.1.1", + ) + go_repository( + name = "com_github_hashicorp_vault_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/vault/api", + sum = "h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ=", + version = "v1.10.0", + ) + go_repository( + name = "com_github_hashicorp_yamux", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hashicorp/yamux", + sum = "h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=", + version = "v0.1.1", + ) + go_repository( + name = "com_github_hexops_gotextdiff", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hexops/gotextdiff", + sum = "h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=", + version = "v1.0.3", + ) + go_repository( + name = "com_github_howeyc_gopass", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/howeyc/gopass", + sum = "h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM=", + version = "v0.0.0-20210920133722-c8aef6fb66ef", + ) + go_repository( + name = "com_github_huandu_xstrings", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/huandu/xstrings", + sum = "h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_hugelgupf_vmtest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/hugelgupf/vmtest", + sum = "h1:aa9+0fjwoGotyC8A3QjdITMAX89g/+qvDAhKPrK1NKE=", + version = "v0.0.0-20240110072021-f6f07acb7aa1", + ) + go_repository( + name = "com_github_ianlancetaylor_demangle", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ianlancetaylor/demangle", + sum = "h1:BA4a7pe6ZTd9F8kXETBoijjFJ/ntaa//1wiH9BZu4zU=", + version = "v0.0.0-20230524184225-eabc099b10ab", + ) + go_repository( + name = "com_github_imdario_mergo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/imdario/mergo", + sum = "h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=", + version = "v0.3.16", + ) + go_repository( + name = "com_github_in_toto_in_toto_golang", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/in-toto/in-toto-golang", + sum = "h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=", + version = "v0.9.0", + ) + go_repository( + name = "com_github_inconshreveable_mousetrap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/inconshreveable/mousetrap", + sum = "h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_insomniacslk_dhcp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/insomniacslk/dhcp", + sum = "h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA=", + version = "v0.0.0-20231206064809-8c70d406f6d2", + ) + go_repository( + name = "com_github_intel_goresctrl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/intel/goresctrl", + sum = "h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_ishidawataru_sctp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ishidawataru/sctp", + sum = "h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo=", + version = "v0.0.0-20230406120618-7ff4192f6ff2", + ) + go_repository( + name = "com_github_jbenet_go_context", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jbenet/go-context", + sum = "h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=", + version = "v0.0.0-20150711004518-d14ea06fba99", + ) + go_repository( + name = "com_github_jedisct1_go_minisign", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jedisct1/go-minisign", + sum = "h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY=", + version = "v0.0.0-20230811132847-661be99b8267", + ) + go_repository( + name = "com_github_jeffashton_win_pdh", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/JeffAshton/win_pdh", + sum = "h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI=", + version = "v0.0.0-20161109143554-76bb4ee9f0ab", + ) + go_repository( + name = "com_github_jellydator_ttlcache_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jellydator/ttlcache/v3", + sum = "h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8=", + version = "v3.1.1", + ) + go_repository( + name = "com_github_jessevdk_go_flags", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jessevdk/go-flags", + sum = "h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_jhump_protoreflect", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jhump/protoreflect", + sum = "h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls=", + version = "v1.15.3", + ) + go_repository( + name = "com_github_jmespath_go_jmespath", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jmespath/go-jmespath", + sum = "h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_jmespath_go_jmespath_internal_testify", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jmespath/go-jmespath/internal/testify", + sum = "h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_jmhodges_clock", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jmhodges/clock", + sum = "h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_jmoiron_sqlx", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jmoiron/sqlx", + sum = "h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=", + version = "v1.3.5", + ) + go_repository( + name = "com_github_jonboulle_clockwork", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jonboulle/clockwork", + sum = "h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_josephspurrier_goversioninfo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/josephspurrier/goversioninfo", + sum = "h1:Puhl12NSHUSALHSuzYwPYQkqa2E1+7SrtAPJorKK0C8=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_josharian_intern", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/josharian/intern", + sum = "h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_josharian_native", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/josharian/native", + sum = "h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_jpillora_backoff", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jpillora/backoff", + sum = "h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_jsimonetti_rtnetlink", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jsimonetti/rtnetlink", + sum = "h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_json_iterator_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/json-iterator/go", + sum = "h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=", + version = "v1.1.12", + ) + go_repository( + name = "com_github_jstemmer_go_junit_report", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/jstemmer/go-junit-report", + sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_julienschmidt_httprouter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/julienschmidt/httprouter", + sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_k0kubun_go_ansi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/k0kubun/go-ansi", + sum = "h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg=", + version = "v0.0.0-20180517002512-3bf9e2903213", + ) + go_repository( + name = "com_github_karrick_godirwalk", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/karrick/godirwalk", + sum = "h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_katexochen_sh_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/katexochen/sh/v3", + sum = "h1:jrU9BWBgp9o2NcetUVm3dNpQ2SK1zG6aF6WF0wtPajc=", + version = "v3.7.0", + ) + go_repository( + name = "com_github_kevinburke_ssh_config", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kevinburke/ssh_config", + sum = "h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_kisielk_errcheck", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kisielk/errcheck", + sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_kisielk_gotool", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_klauspost_compress", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/klauspost/compress", + sum = "h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=", + version = "v1.17.6", + ) + go_repository( + name = "com_github_klauspost_cpuid_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/klauspost/cpuid/v2", + sum = "h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw=", + version = "v2.0.4", + ) + go_repository( + name = "com_github_klauspost_pgzip", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/klauspost/pgzip", + sum = "h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=", + version = "v1.2.6", + ) + go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_kr_fs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kr/fs", + sum = "h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_kr_logfmt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kr/logfmt", + sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=", + version = "v0.0.0-20140226030751-b84e30acd515", + ) + go_repository( + name = "com_github_kr_pretty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kr/pretty", + sum = "h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_kr_pty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kr/pty", + sum = "h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=", + version = "v1.1.8", + ) + go_repository( + name = "com_github_kr_text", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kr/text", + sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_kylelemons_godebug", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/kylelemons/godebug", + sum = "h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_lann_builder", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lann/builder", + sum = "h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=", + version = "v0.0.0-20180802200727-47ae307949d0", + ) + go_repository( + name = "com_github_lann_ps", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lann/ps", + sum = "h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=", + version = "v0.0.0-20150810152359-62de8c46ede0", + ) + go_repository( + name = "com_github_leodido_go_urn", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/leodido/go-urn", + sum = "h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_lestrrat_go_backoff_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/backoff/v2", + sum = "h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=", + version = "v2.0.8", + ) + go_repository( + name = "com_github_lestrrat_go_blackmagic", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/blackmagic", + sum = "h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_lestrrat_go_httpcc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/httpcc", + sum = "h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_lestrrat_go_iter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/iter", + sum = "h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_lestrrat_go_jwx", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/jwx", + sum = "h1:tAx93jN2SdPvFn08fHNAhqFJazn5mBBOB8Zli0g0otA=", + version = "v1.2.25", + ) + go_repository( + name = "com_github_lestrrat_go_option", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lestrrat-go/option", + sum = "h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_letsencrypt_borp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/letsencrypt/borp", + sum = "h1:xS2U6PQYRURk61YN4Y5xvyLbQVyAP/8fpE6hJZdwEWs=", + version = "v0.0.0-20230707160741-6cc6ce580243", + ) + go_repository( + name = "com_github_letsencrypt_boulder", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/letsencrypt/boulder", + sum = "h1:Y0fwz/hllcpgv9X24KyS/x8O6MdsOx217vAp1XV4Is0=", + version = "v0.0.0-20240216200101-4eb5e3caa228", + ) + go_repository( + name = "com_github_letsencrypt_challtestsrv", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/letsencrypt/challtestsrv", + sum = "h1:Lzv4jM+wSgVMCeO5a/F/IzSanhClstFMnX6SfrAJXjI=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_letsencrypt_pkcs11key_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/letsencrypt/pkcs11key/v4", + sum = "h1:qLc/OznH7xMr5ARJgkZCCWk+EomQkiNTOoOF5LAgagc=", + version = "v4.0.0", + ) + go_repository( + name = "com_github_letsencrypt_validator_v10", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/letsencrypt/validator/v10", + sum = "h1:HGFsIltYMUiB5eoFSowFzSoXkocM2k9ctmJ57QMGjys=", + version = "v10.0.0-20230215210743-a0c7dfc17158", + ) + go_repository( + name = "com_github_lib_pq", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lib/pq", + sum = "h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=", + version = "v1.10.9", + ) + go_repository( + name = "com_github_libopenstorage_openstorage", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/libopenstorage/openstorage", + sum = "h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_liggitt_tabwriter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/liggitt/tabwriter", + sum = "h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=", + version = "v0.0.0-20181228230101-89fcab3d43de", + ) + go_repository( + name = "com_github_linuxkit_virtsock", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/linuxkit/virtsock", + sum = "h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=", + version = "v0.0.0-20201010232012-f8cee7dfc7a3", + ) + go_repository( + name = "com_github_lithammer_dedent", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/lithammer/dedent", + sum = "h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_magiconair_properties", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/magiconair/properties", + sum = "h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=", + version = "v1.8.7", + ) + go_repository( + name = "com_github_mailru_easyjson", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mailru/easyjson", + sum = "h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=", + version = "v0.7.7", + ) + go_repository( + name = "com_github_makenowjust_heredoc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/MakeNowJust/heredoc", + sum = "h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_martinjungblut_go_cryptsetup", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/martinjungblut/go-cryptsetup", + patches = [ + "//3rdparty/bazel/com_github_martinjungblut_go_cryptsetup:com_github_martinjungblut_go_cryptsetup.patch", # keep + ], + replace = "github.com/daniel-weisse/go-cryptsetup", + sum = "h1:ToajP6trZoiqlZ3Z4uoG1P02/wtqSw1AcowOXOYjATk=", + version = "v0.0.0-20230705150314-d8c07bd1723c", + ) + go_repository( + name = "com_github_masterminds_goutils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Masterminds/goutils", + sum = "h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_masterminds_semver_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Masterminds/semver/v3", + sum = "h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=", + version = "v3.2.1", + ) + go_repository( + name = "com_github_masterminds_sprig_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Masterminds/sprig/v3", + sum = "h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=", + version = "v3.2.3", + ) + go_repository( + name = "com_github_masterminds_squirrel", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Masterminds/squirrel", + sum = "h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=", + version = "v1.5.4", + ) + go_repository( + name = "com_github_masterminds_vcs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Masterminds/vcs", + sum = "h1:IIA2aBdXvfbIM+yl/eTnL4hb1XwdpvuQLglAix1gweE=", + version = "v1.13.3", + ) + go_repository( + name = "com_github_mattn_go_colorable", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-colorable", + sum = "h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=", + version = "v0.1.13", + ) + go_repository( + name = "com_github_mattn_go_isatty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-isatty", + sum = "h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=", + version = "v0.0.20", + ) + go_repository( + name = "com_github_mattn_go_oci8", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-oci8", + sum = "h1:aEUDxNAyDG0tv8CA3TArnDQNyc4EhnWlsfxRgDHABHM=", + version = "v0.1.1", + ) + go_repository( + name = "com_github_mattn_go_runewidth", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-runewidth", + sum = "h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=", + version = "v0.0.15", + ) + go_repository( + name = "com_github_mattn_go_shellwords", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-shellwords", + sum = "h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=", + version = "v1.0.12", + ) + go_repository( + name = "com_github_mattn_go_sqlite3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mattn/go-sqlite3", + sum = "h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=", + version = "v1.14.19", + ) + go_repository( + name = "com_github_matttproud_golang_protobuf_extensions", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/matttproud/golang_protobuf_extensions", + sum = "h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_matttproud_golang_protobuf_extensions_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/matttproud/golang_protobuf_extensions/v2", + sum = "h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_mdlayher_ethtool", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mdlayher/ethtool", + sum = "h1:XAWHsmKhyPOo42qq/yTPb0eFBGUKKTR1rE0dVrWVQ0Y=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_mdlayher_genetlink", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mdlayher/genetlink", + sum = "h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_mdlayher_netlink", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mdlayher/netlink", + sum = "h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=", + version = "v1.7.2", + ) + go_repository( + name = "com_github_mdlayher_packet", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mdlayher/packet", + sum = "h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_mdlayher_socket", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mdlayher/socket", + sum = "h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_microsoft_go_winio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=", + version = "v0.6.1", + ) + go_repository( + name = "com_github_microsoft_hcsshim", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Microsoft/hcsshim", + sum = "h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=", + version = "v0.11.4", + ) + go_repository( + name = "com_github_miekg_dns", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/miekg/dns", + sum = "h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=", + version = "v1.1.58", + ) + go_repository( + name = "com_github_miekg_pkcs11", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/miekg/pkcs11", + sum = "h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_minio_sha256_simd", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/minio/sha256-simd", + sum = "h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_mistifyio_go_zfs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mistifyio/go-zfs", + sum = "h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=", + version = "v2.1.2-0.20190413222219-f784269be439+incompatible", + ) + go_repository( + name = "com_github_mistifyio_go_zfs_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mistifyio/go-zfs/v3", + sum = "h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=", + version = "v3.0.1", + ) + go_repository( + name = "com_github_mitchellh_cli", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/cli", + sum = "h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=", + version = "v1.1.5", + ) + go_repository( + name = "com_github_mitchellh_colorstring", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/colorstring", + sum = "h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=", + version = "v0.0.0-20190213212951-d06e56a500db", + ) + go_repository( + name = "com_github_mitchellh_copystructure", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/copystructure", + sum = "h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_mitchellh_go_homedir", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/go-homedir", + sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_mitchellh_go_testing_interface", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/go-testing-interface", + sum = "h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=", + version = "v1.14.1", + ) + go_repository( + name = "com_github_mitchellh_go_wordwrap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/go-wordwrap", + sum = "h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_mitchellh_mapstructure", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/mapstructure", + sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_mitchellh_reflectwalk", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mitchellh/reflectwalk", + sum = "h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_moby_ipvs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/ipvs", + sum = "h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_moby_locker", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/locker", + sum = "h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_moby_spdystream", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/spdystream", + sum = "h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_moby_sys_mountinfo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/sys/mountinfo", + sum = "h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=", + version = "v0.7.1", + ) + go_repository( + name = "com_github_moby_sys_sequential", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/sys/sequential", + sum = "h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_moby_sys_signal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/sys/signal", + sum = "h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_moby_sys_symlink", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/sys/symlink", + sum = "h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_moby_sys_user", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/sys/user", + sum = "h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_moby_term", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/moby/term", + sum = "h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_modern_go_concurrent", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/modern-go/concurrent", + sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=", + version = "v0.0.0-20180306012644-bacd9c7ef1dd", + ) + go_repository( + name = "com_github_modern_go_reflect2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/modern-go/reflect2", + sum = "h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_mohae_deepcopy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mohae/deepcopy", + sum = "h1:e+l77LJOEqXTIQihQJVkA6ZxPOUmfPM5e4H7rcpgtSk=", + version = "v0.0.0-20170603005431-491d3605edfb", + ) + go_repository( + name = "com_github_monochromegane_go_gitignore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/monochromegane/go-gitignore", + sum = "h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=", + version = "v0.0.0-20200626010858-205db1a8cc00", + ) + go_repository( + name = "com_github_montanaflynn_stats", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/montanaflynn/stats", + sum = "h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_morikuni_aec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/morikuni/aec", + sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_mr_tron_base58", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mr-tron/base58", + sum = "h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_mrunalp_fileutils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mrunalp/fileutils", + sum = "h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_munnerz_goautoneg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/munnerz/goautoneg", + sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=", + version = "v0.0.0-20191010083416-a7dc8b61c822", + ) + go_repository( + name = "com_github_mwitkow_go_conntrack", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mwitkow/go-conntrack", + sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", + version = "v0.0.0-20190716064945-2f068394615f", + ) + go_repository( + name = "com_github_mxk_go_flowrate", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/mxk/go-flowrate", + sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=", + version = "v0.0.0-20140419014527-cca7078d478f", + ) + go_repository( + name = "com_github_nelsam_hel_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/nelsam/hel/v2", + sum = "h1:Z3TAKd9JS3BoKi6fW+d1bKD2Mf0FzTqDUEAwLWzYPRQ=", + version = "v2.3.3", + ) + go_repository( + name = "com_github_netflix_go_expect", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Netflix/go-expect", + sum = "h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=", + version = "v0.0.0-20220104043353-73e0943537d2", + ) + go_repository( + name = "com_github_niemeyer_pretty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/niemeyer/pretty", + sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", + version = "v0.0.0-20200227124842-a10e7caefd8e", + ) + go_repository( + name = "com_github_nxadm_tail", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/nxadm/tail", + sum = "h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=", + version = "v1.4.11", + ) + go_repository( + name = "com_github_nytimes_gziphandler", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/NYTimes/gziphandler", + sum = "h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_oklog_run", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/oklog/run", + sum = "h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_oklog_ulid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/oklog/ulid", + sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_olareg_olareg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/olareg/olareg", + sum = "h1:iN0dytB++Fnk2axqdCAfUyqrpHsOv2FfVH5MD3nXscA=", + version = "v0.0.0-20240206155231-8ba4b6726143", + ) + go_repository( + name = "com_github_olekukonko_tablewriter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/olekukonko/tablewriter", + sum = "h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=", + version = "v0.0.5", + ) + go_repository( + name = "com_github_oneofone_xxhash", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/OneOfOne/xxhash", + sum = "h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=", + version = "v1.2.8", + ) + go_repository( + name = "com_github_onsi_ginkgo_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/onsi/ginkgo/v2", + sum = "h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY=", + version = "v2.14.0", + ) + go_repository( + name = "com_github_onsi_gomega", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/onsi/gomega", + sum = "h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=", + version = "v1.30.0", + ) + go_repository( + name = "com_github_open_policy_agent_opa", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/open-policy-agent/opa", + sum = "h1:qocVAKyjrqMjCqsU02S/gHyLr4AQQ9xMtuV1kKnnyhM=", + version = "v0.42.2", + ) + go_repository( + name = "com_github_opencontainers_go_digest", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/go-digest", + sum = "h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_opencontainers_image_spec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/image-spec", + sum = "h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_opencontainers_runc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/runc", + sum = "h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=", + version = "v1.1.10", + ) + go_repository( + name = "com_github_opencontainers_runtime_spec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/runtime-spec", + sum = "h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_opencontainers_runtime_tools", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/runtime-tools", + sum = "h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=", + version = "v0.9.1-0.20221107090550-2e043c6bd626", + ) + go_repository( + name = "com_github_opencontainers_selinux", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opencontainers/selinux", + sum = "h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_opentracing_opentracing_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/opentracing/opentracing-go", + sum = "h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_otiai10_copy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/otiai10/copy", + sum = "h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_otiai10_curr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/otiai10/curr", + sum = "h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_otiai10_mint", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/otiai10/mint", + sum = "h1:VYWnrP5fXmz1MXvjuUvcBrXSjGE6xjON+axB/UrpO3E=", + version = "v1.3.2", + ) + go_repository( + name = "com_github_pborman_uuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pborman/uuid", + sum = "h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_pelletier_go_buffruneio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pelletier/go-buffruneio", + sum = "h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_pelletier_go_toml", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pelletier/go-toml", + sum = "h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=", + version = "v1.9.5", + ) + go_repository( + name = "com_github_pelletier_go_toml_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pelletier/go-toml/v2", + sum = "h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=", + version = "v2.1.0", + ) + go_repository( + name = "com_github_peterbourgon_diskv", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/peterbourgon/diskv", + sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=", + version = "v2.0.1+incompatible", + ) + go_repository( + name = "com_github_phayes_freeport", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/phayes/freeport", + sum = "h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=", + version = "v0.0.0-20220201140144-74d24b5ae9f5", + ) + go_repository( + name = "com_github_philhofer_fwd", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/philhofer/fwd", + sum = "h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_pierrec_lz4_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pierrec/lz4/v4", + sum = "h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=", + version = "v4.1.14", + ) + go_repository( + name = "com_github_pjbgf_sha1cd", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pjbgf/sha1cd", + sum = "h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_pkg_browser", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pkg/browser", + sum = "h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=", + version = "v0.0.0-20240102092130-5ac0b6a4141c", + ) + go_repository( + name = "com_github_pkg_diff", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pkg/diff", + sum = "h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=", + version = "v0.0.0-20210226163009-20ebb0f2a09e", + ) + go_repository( + name = "com_github_pkg_errors", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pkg/errors", + sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_pkg_sftp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pkg/sftp", + sum = "h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=", + version = "v1.13.6", + ) + go_repository( + name = "com_github_pmezard_go_difflib", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_posener_complete", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/posener/complete", + sum = "h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=", + version = "v1.2.3", + ) + go_repository( + name = "com_github_poy_onpar", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/poy/onpar", + sum = "h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_pquerna_cachecontrol", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/pquerna/cachecontrol", + sum = "h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_prometheus_client_golang", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/prometheus/client_golang", + sum = "h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=", + version = "v1.18.0", + ) + go_repository( + name = "com_github_prometheus_client_model", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/prometheus/client_model", + sum = "h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_prometheus_common", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/prometheus/common", + sum = "h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=", + version = "v0.47.0", + ) + go_repository( + name = "com_github_prometheus_procfs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/prometheus/procfs", + sum = "h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=", + version = "v0.12.0", + ) + go_repository( + name = "com_github_prometheus_prometheus", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/prometheus/prometheus", + sum = "h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI=", + version = "v0.47.2", + ) + go_repository( + name = "com_github_protonmail_go_crypto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ProtonMail/go-crypto", + sum = "h1:P5Wd8eQ6zAzT4HpJI67FDKnTSf3xiJGQFqY1agDJPy4=", + version = "v1.1.0-alpha.0-proton", + ) + go_repository( + name = "com_github_protonmail_go_mime", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ProtonMail/go-mime", + sum = "h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=", + version = "v0.0.0-20230322103455-7d82a3887f2f", + ) + go_repository( + name = "com_github_protonmail_gopenpgp_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ProtonMail/gopenpgp/v2", + sum = "h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF21O1mRlo=", + version = "v2.7.4", + ) + go_repository( + name = "com_github_rcrowley_go_metrics", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rcrowley/go-metrics", + sum = "h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=", + version = "v0.0.0-20200313005456-10cdbea86bc0", + ) + go_repository( + name = "com_github_redis_go_redis_v9", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/redis/go-redis/v9", + sum = "h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=", + version = "v9.4.0", + ) + go_repository( + name = "com_github_regclient_regclient", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/regclient/regclient", + sum = "h1:d6bXhvz7UYJM+r20ls60RIVdoYh/rp+PygD/dIsJ9UA=", + version = "v0.5.7", + ) + go_repository( + name = "com_github_rivo_uniseg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rivo/uniseg", + sum = "h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=", + version = "v0.4.7", + ) + go_repository( + name = "com_github_robfig_cron_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/robfig/cron/v3", + sum = "h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=", + version = "v3.0.1", + ) + go_repository( + name = "com_github_rogpeppe_fastuuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rogpeppe/fastuuid", + sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_rogpeppe_go_internal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rogpeppe/go-internal", + sum = "h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=", + version = "v1.12.0", + ) + go_repository( + name = "com_github_rs_cors", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rs/cors", + sum = "h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=", + version = "v1.10.1", + ) + go_repository( + name = "com_github_rs_xid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rs/xid", + sum = "h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_rs_zerolog", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rs/zerolog", + sum = "h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=", + version = "v1.31.0", + ) + go_repository( + name = "com_github_rubenv_sql_migrate", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rubenv/sql-migrate", + sum = "h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos=", + version = "v1.6.1", + ) + go_repository( + name = "com_github_rubiojr_go_vhd", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/rubiojr/go-vhd", + sum = "h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc=", + version = "v0.0.0-20200706105327-02e210299021", + ) + go_repository( + name = "com_github_russross_blackfriday", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/russross/blackfriday", + sum = "h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_russross_blackfriday_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/russross/blackfriday/v2", + sum = "h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=", + version = "v2.1.0", + ) + go_repository( + name = "com_github_ryanuber_go_glob", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ryanuber/go-glob", + sum = "h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_sagikazarmark_locafero", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sagikazarmark/locafero", + sum = "h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_sagikazarmark_slog_shim", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sagikazarmark/slog-shim", + sum = "h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_samber_lo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/samber/lo", + sum = "h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=", + version = "v1.38.1", + ) + go_repository( + name = "com_github_samber_slog_multi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/samber/slog-multi", + sum = "h1:6BVH9uHGAsiGkbbtQgAOQJMpKgV8unMrHhhJaw+X1EQ=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_sassoftware_relic", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sassoftware/relic", + sum = "h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A=", + version = "v7.2.1+incompatible", + ) + go_repository( + name = "com_github_sassoftware_relic_v7", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sassoftware/relic/v7", + sum = "h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ=", + version = "v7.6.1", + ) + go_repository( + name = "com_github_schollz_progressbar_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/schollz/progressbar/v3", + sum = "h1:VD+MJPCr4s3wdhTc7OEJ/Z3dAeBzJ7yKH/P4lC5yRTI=", + version = "v3.14.1", + ) + go_repository( + name = "com_github_sebdah_goldie", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sebdah/goldie", + sum = "h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_seccomp_libseccomp_golang", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/seccomp/libseccomp-golang", + sum = "h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=", + version = "v0.10.0", + ) + go_repository( + name = "com_github_secure_systems_lab_go_securesystemslib", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/secure-systems-lab/go-securesystemslib", + sum = "h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=", + version = "v0.8.0", + ) + go_repository( + name = "com_github_segmentio_ksuid", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/segmentio/ksuid", + sum = "h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_sergi_go_diff", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sergi/go-diff", + sum = "h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_shibumi_go_pathspec", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/shibumi/go-pathspec", + sum = "h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_shopify_logrus_bugsnag", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Shopify/logrus-bugsnag", + sum = "h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=", + version = "v0.0.0-20171204204709-577dee27f20d", + ) + go_repository( + name = "com_github_shopspring_decimal", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/shopspring/decimal", + sum = "h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_siderolabs_crypto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/crypto", + sum = "h1:PP84WSDDyCCbjYKePcc0IaMSPXDndz8V3cQ9hMRSvpA=", + version = "v0.4.1", + ) + go_repository( + name = "com_github_siderolabs_gen", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/gen", + sum = "h1:lM69UYggT7yzpubf7hEFaNujPdY55Y9zvQf/NC18GvA=", + version = "v0.4.7", + ) + go_repository( + name = "com_github_siderolabs_go_api_signature", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/go-api-signature", + sum = "h1:ePXOxBT2fxRICsDntXed9kivmVK269nZe5UXvOxgtnM=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_siderolabs_go_blockdevice", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/go-blockdevice", + sum = "h1:2bk4WpEEflGxjrNwp57ye24Pr+cYgAiAeNMWiQOuWbQ=", + version = "v0.4.7", + ) + go_repository( + name = "com_github_siderolabs_go_pointer", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/go-pointer", + sum = "h1:6TshPKep2doDQJAAtHUuHWXbca8ZfyRySjSBT/4GsMU=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_siderolabs_net", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/net", + sum = "h1:1bOgVay/ijPkJz4qct98nHsiB/ysLQU0KLoBC4qLm7I=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_siderolabs_protoenc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/protoenc", + sum = "h1:BqxEmeWQeMpNP3R6WrPqDatX8sM/r4t97OP8mFmg6GA=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_siderolabs_talos_pkg_machinery", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/siderolabs/talos/pkg/machinery", + sum = "h1:xzkHpHqVnio3IL2z44f/dG3TNVvSafZFvuyqlR6J7nY=", + version = "v1.6.4", + ) + go_repository( + name = "com_github_sigstore_protobuf_specs", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/protobuf-specs", + sum = "h1:KIoM7E3C4uaK092q8YoSj/XSf9720f8dlsbYwwOmgEA=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_sigstore_rekor", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/rekor", + sum = "h1:QoVXcS7NppKY+rpbEFVHr4evGDZBBSh65X0g8PXoUkQ=", + version = "v1.3.5", + ) + go_repository( + name = "com_github_sigstore_sigstore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/sigstore", + sum = "h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_sigstore_sigstore_pkg_signature_kms_aws", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/sigstore/pkg/signature/kms/aws", + sum = "h1:rEDdUefulkIQaMJyzLwtgPDLNXBIltBABiFYfb0YmgQ=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_sigstore_sigstore_pkg_signature_kms_azure", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/sigstore/pkg/signature/kms/azure", + sum = "h1:DvRWG99QGWZC5mp42SEde2Xke/Q384Idnj2da7yB+Mk=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_sigstore_sigstore_pkg_signature_kms_gcp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/sigstore/pkg/signature/kms/gcp", + sum = "h1:lwdRsJv1UbBemuk7w5YfXAQilQxMoFevrzamdPbG0wY=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_sigstore_sigstore_pkg_signature_kms_hashivault", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sigstore/sigstore/pkg/signature/kms/hashivault", + sum = "h1:9Ki0qudKpc1FQdef7xHO2bkLyTuw+qNUpWRzjBEmF4c=", + version = "v1.8.1", + ) + go_repository( + name = "com_github_sirupsen_logrus", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sirupsen/logrus", + sum = "h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=", + version = "v1.9.3", + ) + go_repository( + name = "com_github_skeema_knownhosts", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/skeema/knownhosts", + sum = "h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_skratchdot_open_golang", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/skratchdot/open-golang", + sum = "h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=", + version = "v0.0.0-20200116055534-eef842397966", + ) + go_repository( + name = "com_github_soheilhy_cmux", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/soheilhy/cmux", + sum = "h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=", + version = "v0.1.5", + ) + go_repository( + name = "com_github_sourcegraph_conc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/sourcegraph/conc", + sum = "h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=", + version = "v0.3.0", + ) + go_repository( + name = "com_github_spaolacci_murmur3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spaolacci/murmur3", + sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=", + version = "v0.0.0-20180118202830-f09979ecbc72", + ) + go_repository( + name = "com_github_spf13_afero", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spf13/afero", + sum = "h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_spf13_cast", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spf13/cast", + sum = "h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_spf13_cobra", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spf13/cobra", + sum = "h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_spf13_pflag", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spf13/pflag", + sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=", + version = "v1.0.5", + ) + go_repository( + name = "com_github_spf13_viper", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/spf13/viper", + sum = "h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=", + version = "v1.18.2", + ) + go_repository( + name = "com_github_src_d_gcfg", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/src-d/gcfg", + sum = "h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=", + version = "v1.4.0", + ) + go_repository( + name = "com_github_stefanberger_go_pkcs11uri", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/stefanberger/go-pkcs11uri", + sum = "h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=", + version = "v0.0.0-20201008174630-78d3cae3a980", + ) + go_repository( + name = "com_github_stoewer_go_strcase", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/stoewer/go-strcase", + sum = "h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_stretchr_objx", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/stretchr/objx", + sum = "h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=", + version = "v0.5.0", + ) + go_repository( + name = "com_github_stretchr_testify", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/stretchr/testify", + sum = "h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=", + version = "v1.8.4", + ) + go_repository( + name = "com_github_subosito_gotenv", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/subosito/gotenv", + sum = "h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_syndtr_gocapability", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/syndtr/gocapability", + sum = "h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=", + version = "v0.0.0-20200815063812-42c35b437635", + ) + go_repository( + name = "com_github_syndtr_goleveldb", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/syndtr/goleveldb", + sum = "h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=", + version = "v1.0.1-0.20220721030215-126854af5e6d", + ) + go_repository( + name = "com_github_tchap_go_patricia_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/tchap/go-patricia/v2", + sum = "h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=", + version = "v2.3.1", + ) + go_repository( + name = "com_github_theupdateframework_go_tuf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/theupdateframework/go-tuf", + sum = "h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_tink_crypto_tink_go_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/tink-crypto/tink-go/v2", + replace = "github.com/derpsteb/tink-go/v2", + sum = "h1:FVii9oXvddz9sFir5TRYjQKrzJLbVD/hibT+SnRSDzg=", + version = "v2.0.0-20231002051717-a808e454eed6", + ) + go_repository( + name = "com_github_tinylib_msgp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/tinylib/msgp", + sum = "h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0=", + version = "v1.1.5", + ) + go_repository( + name = "com_github_titanous_rocacheck", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/titanous/rocacheck", + sum = "h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=", + version = "v0.0.0-20171023193734-afe73141d399", + ) + go_repository( + name = "com_github_tmc_grpc_websocket_proxy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/tmc/grpc-websocket-proxy", + sum = "h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=", + version = "v0.0.0-20220101234140-673ab2c3ae75", + ) + go_repository( + name = "com_github_tomasen_realip", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/tomasen/realip", + sum = "h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=", + version = "v0.0.0-20180522021738-f0c99a92ddce", + ) + go_repository( + name = "com_github_transparency_dev_merkle", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/transparency-dev/merkle", + sum = "h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4=", + version = "v0.0.2", + ) + go_repository( + name = "com_github_ttacon_chalk", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ttacon/chalk", + sum = "h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=", + version = "v0.0.0-20160626202418-22c06c80ed31", + ) + go_repository( + name = "com_github_u_root_gobusybox_src", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/u-root/gobusybox/src", + sum = "h1:AQX6C886dZqnOrXtbP0U59melqbb1+YnCfRYRfr4M3M=", + version = "v0.0.0-20231224233253-2944a440b6b6", + ) + go_repository( + name = "com_github_u_root_u_root", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/u-root/u-root", + sum = "h1:1AIJqOtdEufYfGb3eRpdaqWONzBOpAwrg1fehbWg+Mg=", + version = "v0.11.1-0.20230807200058-f87ad7ccb594", + ) + go_repository( + name = "com_github_u_root_uio", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/u-root/uio", + sum = "h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg=", + version = "v0.0.0-20230305220412-3e8cd9d6bf63", + ) + go_repository( + name = "com_github_ulikunitz_xz", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ulikunitz/xz", + sum = "h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=", + version = "v0.5.11", + ) + go_repository( + name = "com_github_urfave_cli", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/urfave/cli", + sum = "h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=", + version = "v1.22.14", + ) + go_repository( + name = "com_github_vbatts_tar_split", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vbatts/tar-split", + sum = "h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=", + version = "v0.11.3", + ) + go_repository( + name = "com_github_vektah_gqlparser_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vektah/gqlparser/v2", + sum = "h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc=", + version = "v2.4.5", + ) + go_repository( + name = "com_github_veraison_go_cose", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/veraison/go-cose", + sum = "h1:Ok0Hr3GMAf8K/1NB4sV65QGgCiukG1w1QD+H5tmt0Ow=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_vincent_petithory_dataurl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vincent-petithory/dataurl", + sum = "h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_vishvananda_netlink", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vishvananda/netlink", + sum = "h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=", + version = "v1.2.1-beta.2", + ) + go_repository( + name = "com_github_vishvananda_netns", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vishvananda/netns", + sum = "h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=", + version = "v0.0.4", + ) + go_repository( + name = "com_github_vmihailenco_msgpack", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vmihailenco/msgpack", + sum = "h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=", + version = "v4.0.4+incompatible", + ) + go_repository( + name = "com_github_vmihailenco_msgpack_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vmihailenco/msgpack/v5", + sum = "h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=", + version = "v5.4.1", + ) + go_repository( + name = "com_github_vmihailenco_tagparser_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vmihailenco/tagparser/v2", + sum = "h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_vmware_govmomi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vmware/govmomi", + sum = "h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U=", + version = "v0.30.6", + ) + go_repository( + name = "com_github_vtolstov_go_ioctl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/vtolstov/go-ioctl", + sum = "h1:X6ps8XHfpQjw8dUStzlMi2ybiKQ2Fmdw7UM+TinwvyM=", + version = "v0.0.0-20151206205506-6be9cced4810", + ) + go_repository( + name = "com_github_weppos_publicsuffix_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/weppos/publicsuffix-go", + sum = "h1:h2JizvZl9aIj6za9S5AyrkU+OzIS4CetQthH/ejO+lg=", + version = "v0.30.2-0.20230730094716-a20f9abcc222", + ) + go_repository( + name = "com_github_workiva_go_datastructures", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/Workiva/go-datastructures", + sum = "h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig=", + version = "v1.0.53", + ) + go_repository( + name = "com_github_x448_float16", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/x448/float16", + sum = "h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=", + version = "v0.8.4", + ) + go_repository( + name = "com_github_xanzy_ssh_agent", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xanzy/ssh-agent", + sum = "h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=", + version = "v0.3.3", + ) + go_repository( + name = "com_github_xdg_go_pbkdf2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xdg-go/pbkdf2", + sum = "h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_xdg_go_scram", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xdg-go/scram", + sum = "h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_xdg_go_stringprep", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xdg-go/stringprep", + sum = "h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_xeipuuv_gojsonpointer", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xeipuuv/gojsonpointer", + sum = "h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=", + version = "v0.0.0-20190905194746-02993c407bfb", + ) + go_repository( + name = "com_github_xeipuuv_gojsonreference", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xeipuuv/gojsonreference", + sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=", + version = "v0.0.0-20180127040603-bd5ef7bd5415", + ) + go_repository( + name = "com_github_xeipuuv_gojsonschema", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xeipuuv/gojsonschema", + sum = "h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_xhit_go_str2duration_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xhit/go-str2duration/v2", + sum = "h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=", + version = "v2.1.0", + ) + go_repository( + name = "com_github_xiang90_probing", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xiang90/probing", + sum = "h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=", + version = "v0.0.0-20221125231312-a49e3df8f510", + ) + go_repository( + name = "com_github_xlab_treeprint", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/xlab/treeprint", + sum = "h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_yashtewari_glob_intersection", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yashtewari/glob-intersection", + sum = "h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_youmark_pkcs8", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/youmark/pkcs8", + sum = "h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=", + version = "v0.0.0-20181117223130-1be2e3e5546d", + ) + go_repository( + name = "com_github_ysmood_fetchup", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ysmood/fetchup", + sum = "h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_ysmood_goob", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ysmood/goob", + sum = "h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_ysmood_got", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ysmood/got", + sum = "h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=", + version = "v0.34.1", + ) + go_repository( + name = "com_github_ysmood_gson", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ysmood/gson", + sum = "h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=", + version = "v0.7.3", + ) + go_repository( + name = "com_github_ysmood_leakless", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/ysmood/leakless", + sum = "h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=", + version = "v0.8.0", + ) + go_repository( + name = "com_github_yuin_goldmark", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yuin/goldmark", + sum = "h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=", + version = "v1.4.13", + ) + go_repository( + name = "com_github_yuin_gopher_lua", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yuin/gopher-lua", + sum = "h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_yvasiyarov_go_metrics", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yvasiyarov/go-metrics", + sum = "h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=", + version = "v0.0.0-20140926110328-57bccd1ccd43", + ) + go_repository( + name = "com_github_yvasiyarov_gorelic", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yvasiyarov/gorelic", + sum = "h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=", + version = "v0.0.0-20141212073537-a9bba5b9ab50", + ) + go_repository( + name = "com_github_yvasiyarov_newrelic_platform_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/yvasiyarov/newrelic_platform_go", + sum = "h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=", + version = "v0.0.0-20140908184405-b21fdbd4370f", + ) + go_repository( + name = "com_github_zalando_go_keyring", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/zalando/go-keyring", + sum = "h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4=", + version = "v0.2.2", + ) + go_repository( + name = "com_github_zclconf_go_cty", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/zclconf/go-cty", + sum = "h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI=", + version = "v1.14.2", + ) + go_repository( + name = "com_github_zclconf_go_cty_debug", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/zclconf/go-cty-debug", + sum = "h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=", + version = "v0.0.0-20191215020915-b22d67c1ba0b", + ) + go_repository( + name = "com_github_zmap_zcrypto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/zmap/zcrypto", + sum = "h1:U1b4THKcgOpJ+kILupuznNwPiURtwVW3e9alJvji9+s=", + version = "v0.0.0-20231219022726-a1f61fb1661c", + ) + go_repository( + name = "com_github_zmap_zlint_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "github.com/zmap/zlint/v3", + sum = "h1:vTEaDRtYN0d/1Ax60T+ypvbLQUHwHxbvYRnUMVr35ug=", + version = "v3.6.0", + ) + go_repository( + name = "com_google_cloud_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go", + sum = "h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=", + version = "v0.112.0", + ) + go_repository( + name = "com_google_cloud_go_accessapproval", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/accessapproval", + sum = "h1:uzmAMSgYcnlHa9X9YSQZ4Q1wlfl4NNkZyQgho1Z6p04=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_accesscontextmanager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/accesscontextmanager", + sum = "h1:2GLNaNu9KRJhJBFTIVRoPwk6xE5mUDgD47abBq4Zp/I=", + version = "v1.8.5", + ) + go_repository( + name = "com_google_cloud_go_aiplatform", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/aiplatform", + sum = "h1:0cSrii1ZeLr16MbBoocyy5KVnrSdiQ3KN/vtrTe7RqE=", + version = "v1.60.0", + ) + go_repository( + name = "com_google_cloud_go_analytics", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/analytics", + sum = "h1:Q+y94XH84jM8SK8O7qiY/PJRexb6n7dRbQ6PiUa4YGM=", + version = "v0.23.0", + ) + go_repository( + name = "com_google_cloud_go_apigateway", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/apigateway", + sum = "h1:sPXnpk+6TneKIrjCjcpX5YGsAKy3PTdpIchoj8/74OE=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_apigeeconnect", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/apigeeconnect", + sum = "h1:CrfIKv9Go3fh/QfQgisU3MeP90Ww7l/sVGmr3TpECo8=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_apigeeregistry", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/apigeeregistry", + sum = "h1:C+QU2K+DzDjk4g074ouwHQGkoff1h5OMQp6sblCVreQ=", + version = "v0.8.3", + ) + go_repository( + name = "com_google_cloud_go_appengine", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/appengine", + sum = "h1:l2SviT44zWQiOv8bPoMBzW0vOcMO22iO0s+nVtVhdts=", + version = "v1.8.5", + ) + go_repository( + name = "com_google_cloud_go_area120", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/area120", + sum = "h1:vTs08KPLN/iMzTbxpu5ciL06KcsrVPMjz4IwcQyZ4uY=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_artifactregistry", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/artifactregistry", + sum = "h1:W9sVlyb1VRcUf83w7aM3yMsnp4HS4PoyGqYQNG0O5lI=", + version = "v1.14.7", + ) + go_repository( + name = "com_google_cloud_go_asset", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/asset", + sum = "h1:xgFnBP3luSbUcC9RWJvb3Zkt+y/wW6PKwPHr3ssnIP8=", + version = "v1.17.2", + ) + go_repository( + name = "com_google_cloud_go_assuredworkloads", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/assuredworkloads", + sum = "h1:gCrN3IyvqY3cP0wh2h43d99CgH3G+WYs9CeuFVKChR8=", + version = "v1.11.5", + ) + go_repository( + name = "com_google_cloud_go_automl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/automl", + sum = "h1:ijiJy9sYWh75WrqImXsfWc1e3HR3iO+ef9fvW03Ig/4=", + version = "v1.13.5", + ) + go_repository( + name = "com_google_cloud_go_baremetalsolution", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/baremetalsolution", + sum = "h1:LFydisRmS7hQk9P/YhekwuZGqb45TW4QavcrMToWo5A=", + version = "v1.2.4", + ) + go_repository( + name = "com_google_cloud_go_batch", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/batch", + sum = "h1:2HK4JerwVaIcCh/lJiHwh6+uswPthiMMWhiSWLELayk=", + version = "v1.8.0", + ) + go_repository( + name = "com_google_cloud_go_beyondcorp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/beyondcorp", + sum = "h1:qs0J0O9Ol2h1yA0AU+r7l3hOCPzs2MjE1d6d/kaHIKo=", + version = "v1.0.4", + ) + go_repository( + name = "com_google_cloud_go_bigquery", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/bigquery", + sum = "h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4=", + version = "v1.59.1", + ) + go_repository( + name = "com_google_cloud_go_billing", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/billing", + sum = "h1:oWUEQvuC4JvtnqLZ35zgzdbuHt4Itbftvzbe6aEyFdE=", + version = "v1.18.2", + ) + go_repository( + name = "com_google_cloud_go_binaryauthorization", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/binaryauthorization", + sum = "h1:1jcyh2uIUwSZkJ/JmL8kd5SUkL/Krbv8zmYLEbAz6kY=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_certificatemanager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/certificatemanager", + sum = "h1:UMBr/twXvH3jcT5J5/YjRxf2tvwTYIfrpemTebe0txc=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_channel", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/channel", + sum = "h1:/omiBnyFjm4S1ETHoOmJbL7LH7Ljcei4rYG6Sj3hc80=", + version = "v1.17.5", + ) + go_repository( + name = "com_google_cloud_go_cloudbuild", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/cloudbuild", + sum = "h1:ZB6oOmJo+MTov9n629fiCrO9YZPOg25FZvQ7gIHu5ng=", + version = "v1.15.1", + ) + go_repository( + name = "com_google_cloud_go_clouddms", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/clouddms", + sum = "h1:Sr0Zo5EAcPQiCBgHWICg3VGkcdS/LLP1d9SR7qQBM/s=", + version = "v1.7.4", + ) + go_repository( + name = "com_google_cloud_go_cloudtasks", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/cloudtasks", + sum = "h1:EUt1hIZ9bLv8Iz9yWaCrqgMnIU+Tdh0yXM1MMVGhjfE=", + version = "v1.12.6", + ) + go_repository( + name = "com_google_cloud_go_compute", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/compute", + sum = "h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=", + version = "v1.24.0", + ) + go_repository( + name = "com_google_cloud_go_compute_metadata", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/compute/metadata", + sum = "h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=", + version = "v0.2.3", + ) + go_repository( + name = "com_google_cloud_go_contactcenterinsights", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/contactcenterinsights", + sum = "h1:6Vs/YnDG5STGjlWMEjN/xtmft7MrOTOnOZYUZtGTx0w=", + version = "v1.13.0", + ) + go_repository( + name = "com_google_cloud_go_container", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/container", + sum = "h1:MAaNH7VRNPWEhvqOypq2j+7ONJKrKzon4v9nS3nLZe0=", + version = "v1.31.0", + ) + go_repository( + name = "com_google_cloud_go_containeranalysis", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/containeranalysis", + sum = "h1:doJ0M1ljS4hS0D2UbHywlHGwB7sQLNrt9vFk9Zyi7vY=", + version = "v0.11.4", + ) + go_repository( + name = "com_google_cloud_go_datacatalog", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/datacatalog", + sum = "h1:A0vKYCQdxQuV4Pi0LL9p39Vwvg4jH5yYveMv50gU5Tw=", + version = "v1.19.3", + ) + go_repository( + name = "com_google_cloud_go_dataflow", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dataflow", + sum = "h1:RYHtcPhmE664+F0Je46p+NvFbG8z//KCXp+uEqB4jZU=", + version = "v0.9.5", + ) + go_repository( + name = "com_google_cloud_go_dataform", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dataform", + sum = "h1:5e4eqGrd0iDTCg4Q+VlAao5j2naKAA7xRurNtwmUknU=", + version = "v0.9.2", + ) + go_repository( + name = "com_google_cloud_go_datafusion", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/datafusion", + sum = "h1:HQ/BUOP8OIGJxuztpYvNvlb+/U+/Bfs9SO8tQbh61fk=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_datalabeling", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/datalabeling", + sum = "h1:GpIFRdm0qIZNsxqURFJwHt0ZBJZ0nF/mUVEigR7PH/8=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_dataplex", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dataplex", + sum = "h1:fxIfdU8fxzR3clhOoNI7XFppvAmndxDu1AMH+qX9WKQ=", + version = "v1.14.2", + ) + go_repository( + name = "com_google_cloud_go_dataproc_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dataproc/v2", + sum = "h1:/u81Fd+BvCLp+xjctI1DiWVJn6cn9/s3Akc8xPH02yk=", + version = "v2.4.0", + ) + go_repository( + name = "com_google_cloud_go_dataqna", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dataqna", + sum = "h1:9ybXs3nr9BzxSGC04SsvtuXaHY0qmJSLIpIAbZo9GqQ=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_datastore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/datastore", + sum = "h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg=", + version = "v1.15.0", + ) + go_repository( + name = "com_google_cloud_go_datastream", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/datastream", + sum = "h1:o1QDKMo/hk0FN7vhoUQURREuA0rgKmnYapB+1M+7Qz4=", + version = "v1.10.4", + ) + go_repository( + name = "com_google_cloud_go_deploy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/deploy", + sum = "h1:m27Ojwj03gvpJqCbodLYiVmE9x4/LrHGGMjzc0LBfM4=", + version = "v1.17.1", + ) + go_repository( + name = "com_google_cloud_go_dialogflow", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dialogflow", + sum = "h1:KqG0oxGE71qo0lRVyAoeBozefCvsMfcDzDjoLYSY0F4=", + version = "v1.49.0", + ) + go_repository( + name = "com_google_cloud_go_dlp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/dlp", + sum = "h1:lTipOuJaSjlYnnotPMbEhKURLC6GzCMDDzVbJAEbmYM=", + version = "v1.11.2", + ) + go_repository( + name = "com_google_cloud_go_documentai", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/documentai", + sum = "h1:lI62GMEEPO6vXJI9hj+G9WjOvnR0hEjvjokrnex4cxA=", + version = "v1.25.0", + ) + go_repository( + name = "com_google_cloud_go_domains", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/domains", + sum = "h1:Mml/R6s3vQQvFPpi/9oX3O5dRirgjyJ8cksK8N19Y7g=", + version = "v0.9.5", + ) + go_repository( + name = "com_google_cloud_go_edgecontainer", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/edgecontainer", + sum = "h1:tBY32km78ScpK2aOP84JoW/+wtpx5WluyPUSEE3270U=", + version = "v1.1.5", + ) + go_repository( + name = "com_google_cloud_go_errorreporting", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/errorreporting", + sum = "h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=", + version = "v0.3.0", + ) + go_repository( + name = "com_google_cloud_go_essentialcontacts", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/essentialcontacts", + sum = "h1:13eHn5qBnsawxI7mIrv4jRIEmQ1xg0Ztqw5ZGqtUNfA=", + version = "v1.6.6", + ) + go_repository( + name = "com_google_cloud_go_eventarc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/eventarc", + sum = "h1:ORkd6/UV5FIdA8KZQDLNZYKS7BBOrj0p01DXPmT4tE4=", + version = "v1.13.4", + ) + go_repository( + name = "com_google_cloud_go_filestore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/filestore", + sum = "h1:X5G4y/vrUo1B8Nsz93qSWTMAcM8LXbGUldq33OdcdCw=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_firestore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/firestore", + sum = "h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw=", + version = "v1.14.0", + ) + go_repository( + name = "com_google_cloud_go_functions", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/functions", + sum = "h1:IWVylmK5F6hJ3R5zaRW7jI5PrWhCvtBVU4axQLmXSo4=", + version = "v1.16.0", + ) + go_repository( + name = "com_google_cloud_go_gkebackup", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/gkebackup", + sum = "h1:iuE8KNtTsPOc79qeWoNS8zOWoXPD9SAdOmwgxtlCmh8=", + version = "v1.3.5", + ) + go_repository( + name = "com_google_cloud_go_gkeconnect", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/gkeconnect", + sum = "h1:17d+ZSSXKqG/RwZCq3oFMIWLPI8Zw3b8+a9/BEVlwH0=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_gkehub", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/gkehub", + sum = "h1:RboLNFzf9wEMSo7DrKVBlf+YhK/A/jrLN454L5Tz99Q=", + version = "v0.14.5", + ) + go_repository( + name = "com_google_cloud_go_gkemulticloud", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/gkemulticloud", + sum = "h1:rsSZAGLhyjyE/bE2ToT5fqo1qSW7S+Ubsc9jFOcbhSI=", + version = "v1.1.1", + ) + go_repository( + name = "com_google_cloud_go_gsuiteaddons", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/gsuiteaddons", + sum = "h1:CZEbaBwmbYdhFw21Fwbo+C35HMe36fTE0FBSR4KSfWg=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_iam", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/iam", + sum = "h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=", + version = "v1.1.6", + ) + go_repository( + name = "com_google_cloud_go_iap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/iap", + sum = "h1:94zirc2r4t6KzhAMW0R6Dme005eTP6yf7g6vN4IhRrA=", + version = "v1.9.4", + ) + go_repository( + name = "com_google_cloud_go_ids", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/ids", + sum = "h1:xd4U7pgl3GHV+MABnv1BF4/Vy/zBF7CYC8XngkOLzag=", + version = "v1.4.5", + ) + go_repository( + name = "com_google_cloud_go_iot", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/iot", + sum = "h1:munTeBlbqI33iuTYgXy7S8lW2TCgi5l1hA4roSIY+EE=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_kms", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/kms", + sum = "h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=", + version = "v1.15.7", + ) + go_repository( + name = "com_google_cloud_go_language", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/language", + sum = "h1:iaJZg6K4j/2PvZZVcjeO/btcWWIllVRBhuTFjGO4LXs=", + version = "v1.12.3", + ) + go_repository( + name = "com_google_cloud_go_lifesciences", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/lifesciences", + sum = "h1:gXvN70m2p+4zgJFzaz6gMKaxTuF9WJ0USYoMLWAOm8g=", + version = "v0.9.5", + ) + go_repository( + name = "com_google_cloud_go_logging", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/logging", + sum = "h1:iEIOXFO9EmSiTjDmfpbRjOxECO7R8C7b8IXUGOj7xZw=", + version = "v1.9.0", + ) + go_repository( + name = "com_google_cloud_go_longrunning", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/longrunning", + sum = "h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=", + version = "v0.5.5", + ) + go_repository( + name = "com_google_cloud_go_managedidentities", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/managedidentities", + sum = "h1:+bpih1piZVLxla/XBqeSUzJBp8gv9plGHIMAI7DLpDM=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_maps", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/maps", + sum = "h1:EVCZAiDvog9So46460BGbCasPhi613exoaQbpilMVlk=", + version = "v1.6.4", + ) + go_repository( + name = "com_google_cloud_go_mediatranslation", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/mediatranslation", + sum = "h1:c76KdIXljQHSCb/Cy47S8H4s05A4zbK3pAFGzwcczZo=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_memcache", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/memcache", + sum = "h1:yeDv5qxRedFosvpMSEswrqUsJM5OdWvssPHFliNFTc4=", + version = "v1.10.5", + ) + go_repository( + name = "com_google_cloud_go_metastore", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/metastore", + sum = "h1:dR7vqWXlK6IYR8Wbu9mdFfwlVjodIBhd1JRrpZftTEg=", + version = "v1.13.4", + ) + go_repository( + name = "com_google_cloud_go_monitoring", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/monitoring", + sum = "h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4=", + version = "v1.18.0", + ) + go_repository( + name = "com_google_cloud_go_networkconnectivity", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/networkconnectivity", + sum = "h1:GBfXFhLyPspnaBE3nI/BRjdhW8vcbpT9QjE/4kDCDdc=", + version = "v1.14.4", + ) + go_repository( + name = "com_google_cloud_go_networkmanagement", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/networkmanagement", + sum = "h1:aLV5GcosBNmd6M8+a0ekB0XlLRexv4fvnJJrYnqeBcg=", + version = "v1.9.4", + ) + go_repository( + name = "com_google_cloud_go_networksecurity", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/networksecurity", + sum = "h1:+caSxBTj0E8OYVh/5wElFdjEMO1S/rZtE1152Cepchc=", + version = "v0.9.5", + ) + go_repository( + name = "com_google_cloud_go_notebooks", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/notebooks", + sum = "h1:FH48boYmrWVQ6k0Mx/WrnNafXncT5iSYxA8CNyWTgy0=", + version = "v1.11.3", + ) + go_repository( + name = "com_google_cloud_go_optimization", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/optimization", + sum = "h1:63NZaWyN+5rZEKHPX4ACpw3BjgyeuY8+rCehiCMaGPY=", + version = "v1.6.3", + ) + go_repository( + name = "com_google_cloud_go_orchestration", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/orchestration", + sum = "h1:YHgWMlrPttIVGItgGfuvO2KM7x+y9ivN/Yk92pMm1a4=", + version = "v1.8.5", + ) + go_repository( + name = "com_google_cloud_go_orgpolicy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/orgpolicy", + sum = "h1:2JbXigqBJVp8Dx5dONUttFqewu4fP0p3pgOdIZAhpYU=", + version = "v1.12.1", + ) + go_repository( + name = "com_google_cloud_go_osconfig", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/osconfig", + sum = "h1:Mo5jGAxOMKH/PmDY7fgY19yFcVbvwREb5D5zMPQjFfo=", + version = "v1.12.5", + ) + go_repository( + name = "com_google_cloud_go_oslogin", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/oslogin", + sum = "h1:1K4nOT5VEZNt7XkhaTXupBYos5HjzvJMfhvyD2wWdFs=", + version = "v1.13.1", + ) + go_repository( + name = "com_google_cloud_go_phishingprotection", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/phishingprotection", + sum = "h1:DH3WFLzEoJdW/6xgsmoDqOwT1xddFi7gKu0QGZQhpGU=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_policytroubleshooter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/policytroubleshooter", + sum = "h1:c0WOzC6hz964QWNBkyKfna8A2jOIx1zzZa43Gx/P09o=", + version = "v1.10.3", + ) + go_repository( + name = "com_google_cloud_go_privatecatalog", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/privatecatalog", + sum = "h1:UZ0assTnATXSggoxUIh61RjTQ4P9zCMk/kEMbn0nMYA=", + version = "v0.9.5", + ) + go_repository( + name = "com_google_cloud_go_profiler", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/profiler", + sum = "h1:ZeRDZbsOBDyRG0OiK0Op1/XWZ3xeLwJc9zjkzczUxyY=", + version = "v0.4.0", + ) + go_repository( + name = "com_google_cloud_go_pubsub", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/pubsub", + sum = "h1:dfEPuGCHGbWUhaMCTHUFjfroILEkx55iUmKBZTP5f+Y=", + version = "v1.36.1", + ) + go_repository( + name = "com_google_cloud_go_pubsublite", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/pubsublite", + sum = "h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY=", + version = "v1.8.1", + ) + go_repository( + name = "com_google_cloud_go_recaptchaenterprise_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/recaptchaenterprise/v2", + sum = "h1:U3Wfq12X9cVMuTpsWDSURnXF0Z9hSPTHj+xsnXDRLsw=", + version = "v2.9.2", + ) + go_repository( + name = "com_google_cloud_go_recommendationengine", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/recommendationengine", + sum = "h1:ineqLswaCSBY0csYv5/wuXJMBlxATK6Xc5jJkpiTEdM=", + version = "v0.8.5", + ) + go_repository( + name = "com_google_cloud_go_recommender", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/recommender", + sum = "h1:LVLYS3r3u0MSCxQSDUtLSkporEGi9OAE6hGvayrZNPs=", + version = "v1.12.1", + ) + go_repository( + name = "com_google_cloud_go_redis", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/redis", + sum = "h1:QF0maEdVv0Fj/2roU8sX3NpiDBzP9ICYTO+5F32gQNo=", + version = "v1.14.2", + ) + go_repository( + name = "com_google_cloud_go_resourcemanager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/resourcemanager", + sum = "h1:AZWr1vWVDKGwfLsVhcN+vcwOz3xqqYxtmMa0aABCMms=", + version = "v1.9.5", + ) + go_repository( + name = "com_google_cloud_go_resourcesettings", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/resourcesettings", + sum = "h1:BTr5MVykJwClASci/7Og4Qfx70aQ4n3epsNLj94ZYgw=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_retail", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/retail", + sum = "h1:Fn1GuAua1c6crCGqfJ1qMxG1Xh10Tg/x5EUODEHMqkw=", + version = "v1.16.0", + ) + go_repository( + name = "com_google_cloud_go_run", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/run", + sum = "h1:m9WDA7DzTpczhZggwYlZcBWgCRb+kgSIisWn1sbw2rQ=", + version = "v1.3.4", + ) + go_repository( + name = "com_google_cloud_go_scheduler", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/scheduler", + sum = "h1:5U8iXLoQ03qOB+ZXlAecU7fiE33+u3QiM9nh4cd0eTE=", + version = "v1.10.6", + ) + go_repository( + name = "com_google_cloud_go_secretmanager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/secretmanager", + sum = "h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY=", + version = "v1.11.5", + ) + go_repository( + name = "com_google_cloud_go_security", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/security", + sum = "h1:wTKJQ10j8EYgvE8Y+KhovxDRVDk2iv/OsxZ6GrLP3kE=", + version = "v1.15.5", + ) + go_repository( + name = "com_google_cloud_go_securitycenter", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/securitycenter", + sum = "h1:/5jjkZ+uGe8hZ7pvd7pO30VW/a+pT2MrrdgOqjyucKQ=", + version = "v1.24.4", + ) + go_repository( + name = "com_google_cloud_go_servicedirectory", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/servicedirectory", + sum = "h1:da7HFI1229kyzIyuVEzHXip0cw0d+E0s8mjQby0WN+k=", + version = "v1.11.4", + ) + go_repository( + name = "com_google_cloud_go_shell", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/shell", + sum = "h1:3Fq2hzO0ZSyaqBboJrFkwwf/qMufDtqwwA6ep8EZxEI=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_spanner", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/spanner", + sum = "h1:fJq+ZfQUDHE+cy1li0bJA8+sy2oiSGhuGqN5nqVaZdU=", + version = "v1.57.0", + ) + go_repository( + name = "com_google_cloud_go_speech", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/speech", + sum = "h1:nuFc+Kj5B8de75nN4FdPyUbI2SiBoHZG6BLurXL56Q0=", + version = "v1.21.1", + ) + go_repository( + name = "com_google_cloud_go_storage", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/storage", + sum = "h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg=", + version = "v1.38.0", + ) + go_repository( + name = "com_google_cloud_go_storagetransfer", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/storagetransfer", + sum = "h1:dy4fL3wO0VABvzM05ycMUPFHxTPbJz9Em8ikAJVqSbI=", + version = "v1.10.4", + ) + go_repository( + name = "com_google_cloud_go_talent", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/talent", + sum = "h1:JssV0CE3FNujuSWn7SkosOzg7qrMxVnt6txOfGcMSa4=", + version = "v1.6.6", + ) + go_repository( + name = "com_google_cloud_go_texttospeech", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/texttospeech", + sum = "h1:dxY2Q5mHCbrGa3oPR2O3PCicdnvKa1JmwGQK36EFLOw=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_tpu", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/tpu", + sum = "h1:C8YyYda8WtNdBoCgFwwBzZd+S6+EScHOxM/z1h0NNp8=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_trace", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/trace", + sum = "h1:0pr4lIKJ5XZFYD9GtxXEWr0KkVeigc3wlGpZco0X1oA=", + version = "v1.10.5", + ) + go_repository( + name = "com_google_cloud_go_translate", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/translate", + sum = "h1:upovZ0wRMdzZvXnu+RPam41B0mRJ+coRXFP2cYFJ7ew=", + version = "v1.10.1", + ) + go_repository( + name = "com_google_cloud_go_video", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/video", + sum = "h1:TXwotxkShP1OqgKsbd+b8N5hrIHavSyLGvYnLGCZ7xc=", + version = "v1.20.4", + ) + go_repository( + name = "com_google_cloud_go_videointelligence", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/videointelligence", + sum = "h1:mYaWH8uhUCXLJCN3gdXswKzRa2+lK0zN6/KsIubm6pE=", + version = "v1.11.5", + ) + go_repository( + name = "com_google_cloud_go_vision_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/vision/v2", + sum = "h1:W52z1b6LdGI66MVhE70g/NFty9zCYYcjdKuycqmlhtg=", + version = "v2.8.0", + ) + go_repository( + name = "com_google_cloud_go_vmmigration", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/vmmigration", + sum = "h1:5v9RT2vWyuw3pK2ox0HQpkoftO7Q7/8591dTxxQc79g=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_vmwareengine", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/vmwareengine", + sum = "h1:EGdDi9QbqThfZq3ILcDK5g+m9jTevc34AY5tACx5v7k=", + version = "v1.1.1", + ) + go_repository( + name = "com_google_cloud_go_vpcaccess", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/vpcaccess", + sum = "h1:XyL6hTLtEM/eE4F1GEge8xUN9ZCkiVWn44K/YA7z1rQ=", + version = "v1.7.5", + ) + go_repository( + name = "com_google_cloud_go_webrisk", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/webrisk", + sum = "h1:251MvGuC8wisNN7+jqu9DDDZAi38KiMXxOpA/EWy4dE=", + version = "v1.9.5", + ) + go_repository( + name = "com_google_cloud_go_websecurityscanner", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/websecurityscanner", + sum = "h1:YqWZrZYabG88TZt7364XWRJGhxmxhony2ZUyZEYMF2k=", + version = "v1.6.5", + ) + go_repository( + name = "com_google_cloud_go_workflows", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "cloud.google.com/go/workflows", + sum = "h1:uHNmUiatTbPQ4H1pabwfzpfEYD4BBnqDHqMm2IesOh4=", + version = "v1.12.4", + ) + go_repository( + name = "com_shuralyov_dmitri_gpu_mtl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "dmitri.shuralyov.com/gpu/mtl", + sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=", + version = "v0.0.0-20190408044501-666a987793e9", + ) + go_repository( + name = "com_sslmate_software_src_go_pkcs12", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "software.sslmate.com/src/go-pkcs12", + sum = "h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE=", + version = "v0.2.0", + ) + go_repository( + name = "dev_gocloud", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gocloud.dev", + sum = "h1:q5zoXux4xkOZP473e1EZbG8Gq9f0vlg1VNH5Du/ybus=", + version = "v0.36.0", + ) + go_repository( + name = "in_gopkg_alecthomas_kingpin_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/alecthomas/kingpin.v2", + sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=", + version = "v2.2.6", + ) + go_repository( + name = "in_gopkg_check_v1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/check.v1", + sum = "h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=", + version = "v1.0.0-20201130134442-10cb98267c6c", + ) + go_repository( + name = "in_gopkg_cheggaaa_pb_v1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/cheggaaa/pb.v1", + sum = "h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=", + version = "v1.0.28", + ) + go_repository( + name = "in_gopkg_errgo_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/errgo.v2", + sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=", + version = "v2.1.0", + ) + go_repository( + name = "in_gopkg_evanphx_json_patch_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/evanphx/json-patch.v5", + sum = "h1:hx1VU2SGj4F8r9b8GUwJLdc8DNO8sy79ZGui0G05GLo=", + version = "v5.9.0", + ) + go_repository( + name = "in_gopkg_gcfg_v1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/gcfg.v1", + sum = "h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=", + version = "v1.2.3", + ) + go_repository( + name = "in_gopkg_go_jose_go_jose_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/go-jose/go-jose.v2", + sum = "h1:Rl5+9rA0kG3vsO1qhncMPRT5eHICihAMQYJkD7u/i4M=", + version = "v2.6.2", + ) + go_repository( + name = "in_gopkg_inf_v0", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/inf.v0", + sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=", + version = "v0.9.1", + ) + go_repository( + name = "in_gopkg_ini_v1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/ini.v1", + sum = "h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=", + version = "v1.67.0", + ) + go_repository( + name = "in_gopkg_natefinch_lumberjack_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/natefinch/lumberjack.v2", + sum = "h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=", + version = "v2.2.1", + ) + go_repository( + name = "in_gopkg_square_go_jose_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/square/go-jose.v2", + sum = "h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=", + version = "v2.6.0", + ) + go_repository( + name = "in_gopkg_src_d_go_billy_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/src-d/go-billy.v4", + sum = "h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=", + version = "v4.3.2", + ) + go_repository( + name = "in_gopkg_src_d_go_git_fixtures_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/src-d/go-git-fixtures.v3", + sum = "h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=", + version = "v3.5.0", + ) + go_repository( + name = "in_gopkg_src_d_go_git_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/src-d/go-git.v4", + sum = "h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=", + version = "v4.13.1", + ) + go_repository( + name = "in_gopkg_tomb_v1", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/tomb.v1", + sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", + version = "v1.0.0-20141024135613-dd632973f1e7", + ) + go_repository( + name = "in_gopkg_warnings_v0", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/warnings.v0", + sum = "h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=", + version = "v0.1.2", + ) + go_repository( + name = "in_gopkg_yaml_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/yaml.v2", + sum = "h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=", + version = "v2.4.0", + ) + go_repository( + name = "in_gopkg_yaml_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gopkg.in/yaml.v3", + sum = "h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=", + version = "v3.0.1", + ) + go_repository( + name = "io_cncf_tags_container_device_interface", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "tags.cncf.io/container-device-interface", + sum = "h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg=", + version = "v0.6.2", + ) + go_repository( + name = "io_cncf_tags_container_device_interface_specs_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "tags.cncf.io/container-device-interface/specs-go", + sum = "h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ=", + version = "v0.6.0", + ) + go_repository( + name = "io_etcd_go_bbolt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/bbolt", + sum = "h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=", + version = "v1.3.8", + ) + go_repository( + name = "io_etcd_go_etcd_api_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/api/v3", + sum = "h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=", + version = "v3.5.12", + ) + go_repository( + name = "io_etcd_go_etcd_client_pkg_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/client/pkg/v3", + sum = "h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=", + version = "v3.5.12", + ) + go_repository( + name = "io_etcd_go_etcd_client_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/client/v2", + sum = "h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=", + version = "v2.305.10", + ) + go_repository( + name = "io_etcd_go_etcd_client_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/client/v3", + sum = "h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=", + version = "v3.5.12", + ) + go_repository( + name = "io_etcd_go_etcd_etcdctl_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/etcdctl/v3", + sum = "h1:Wiv+g9i12mXjNgwz/3S8p01U3IPueGUbTcgBCpQ/Fw4=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_etcdutl_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/etcdutl/v3", + sum = "h1:o57fNgdP9Y99wZzpQ5ky5Jb6323/nisMtCOj1+kQwgc=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_pkg_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/pkg/v3", + sum = "h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_raft_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/raft/v3", + sum = "h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_server_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/server/v3", + sum = "h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_tests_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/tests/v3", + sum = "h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s=", + version = "v3.5.10", + ) + go_repository( + name = "io_etcd_go_etcd_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/etcd/v3", + sum = "h1:M147e9UGqf9kZK19ptQOTt3Nue0inAOoUCbk9S+VMZk=", + version = "v3.5.10", + ) + go_repository( + name = "io_filippo_edwards25519", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "filippo.io/edwards25519", + sum = "h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=", + version = "v1.1.0", + ) + go_repository( + name = "io_k8s_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/api", + sum = "h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_apiextensions_apiserver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/apiextensions-apiserver", + sum = "h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_apimachinery", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/apimachinery", + sum = "h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_apiserver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/apiserver", + sum = "h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_cli_runtime", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/cli-runtime", + sum = "h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_client_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/client-go", + sum = "h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_cloud_provider", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/cloud-provider", + replace = "k8s.io/cloud-provider", + sum = "h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_cluster_bootstrap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/cluster-bootstrap", + sum = "h1:zCYdZ+LWDj4O86FB5tDKckIEsf2qBHjcp78xtjOzD3A=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_code_generator", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/code-generator", + sum = "h1:2LQfayGDhaIlaamXjIjEQlCMy4JNCH9lrzas4DNW1GQ=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_component_base", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/component-base", + sum = "h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_component_helpers", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/component-helpers", + sum = "h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_controller_manager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/controller-manager", + replace = "k8s.io/controller-manager", + sum = "h1:kEv9sKLnjDkoSqeouWp2lZ8P33an5wrDJpOMqoyD7pc=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_cri_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/cri-api", + sum = "h1:atenAqOltRsFqcCQlFFpDnl/R4aGfOELoNLTDJfd7t8=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_csi_translation_lib", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/csi-translation-lib", + replace = "k8s.io/csi-translation-lib", + sum = "h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_dynamic_resource_allocation", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/dynamic-resource-allocation", + replace = "k8s.io/dynamic-resource-allocation", + sum = "h1:JQW5erdoOsvhst7DxMfEpnXhrfm9SmNTnvyaXdqTLAE=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_endpointslice", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/endpointslice", + replace = "k8s.io/endpointslice", + sum = "h1:HM+zsyqSALW7FzOVCWYsF+eFabiTGDrZpLEZZX2065U=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_gengo", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/gengo", + sum = "h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=", + version = "v0.0.0-20230829151522-9cce18d56c01", + ) + go_repository( + name = "io_k8s_klog_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/klog/v2", + sum = "h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=", + version = "v2.120.1", + ) + go_repository( + name = "io_k8s_kms", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kms", + sum = "h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kube_aggregator", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kube-aggregator", + replace = "k8s.io/kube-aggregator", + sum = "h1:N4fmtePxOZ+bwiK1RhVEztOU+gkoVkvterHgpwAuiTw=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kube_controller_manager", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kube-controller-manager", + replace = "k8s.io/kube-controller-manager", + sum = "h1:25nmyTOdjOLM1QLe4nbu5jvlLSv1ZIPFDvmUUWvbuSw=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kube_openapi", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kube-openapi", + sum = "h1:QSpdNrZ9uRlV0VkqLvVO0Rqg8ioKi3oSw7O5P7pJV8M=", + version = "v0.0.0-20240220201932-37d671a357a5", + ) + go_repository( + name = "io_k8s_kube_proxy", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kube-proxy", + replace = "k8s.io/kube-proxy", + sum = "h1:nZJdLzHTIJ2okftUMsBvEidtH57GAOMMPFKBcA0V+Bg=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kube_scheduler", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kube-scheduler", + replace = "k8s.io/kube-scheduler", + sum = "h1:n4v68EvxYhy7o5Q/LFPgqBEGi7lKoiAxwQ0gQyMoj9M=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kubectl", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kubectl", + sum = "h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kubelet", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kubelet", + sum = "h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_kubernetes", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/kubernetes", + sum = "h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg=", + version = "v1.29.0", + ) + go_repository( + name = "io_k8s_legacy_cloud_providers", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/legacy-cloud-providers", + replace = "k8s.io/legacy-cloud-providers", + sum = "h1:fjGV9OhqseUTp3R8xOm2TBoAxyuRTOS6B2zFTSJ80RE=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_metrics", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/metrics", + sum = "h1:a6dWcNM+EEowMzMZ8trka6wZtSRIfEA/9oLjuhBksGc=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_mount_utils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/mount-utils", + sum = "h1:KcUE0bFHONQC10V3SuLWQ6+l8nmJggw9lKLpDftIshI=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_pod_security_admission", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/pod-security-admission", + replace = "k8s.io/pod-security-admission", + sum = "h1:tY/ldtkbBCulMYVSWg6ZDLlgDYDWy6rLj8e/AgmwSj4=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_sample_apiserver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/sample-apiserver", + replace = "k8s.io/sample-apiserver", + sum = "h1:bUEz09ehjQE/xpgMVkutbBfZhcLvg1BvCMLvJnbLZbc=", + version = "v0.29.0", + ) + go_repository( + name = "io_k8s_sigs_apiserver_network_proxy_konnectivity_client", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/apiserver-network-proxy/konnectivity-client", + sum = "h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=", + version = "v0.28.0", + ) + go_repository( + name = "io_k8s_sigs_controller_runtime", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/controller-runtime", + sum = "h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0=", + version = "v0.17.2", + ) + go_repository( + name = "io_k8s_sigs_json", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/json", + sum = "h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=", + version = "v0.0.0-20221116044647-bc3834ca7abd", + ) + go_repository( + name = "io_k8s_sigs_kustomize_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/kustomize/api", + sum = "h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g=", + version = "v0.16.0", + ) + go_repository( + name = "io_k8s_sigs_kustomize_kustomize_v5", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/kustomize/kustomize/v5", + sum = "h1:vq2TtoDcQomhy7OxXLUOzSbHMuMYq0Bjn93cDtJEdKw=", + version = "v5.0.4-0.20230601165947-6ce0bf390ce3", + ) + go_repository( + name = "io_k8s_sigs_kustomize_kyaml", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/kustomize/kyaml", + sum = "h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0=", + version = "v0.16.0", + ) + go_repository( + name = "io_k8s_sigs_release_utils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/release-utils", + sum = "h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU=", + version = "v0.7.7", + ) + go_repository( + name = "io_k8s_sigs_structured_merge_diff_v4", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/structured-merge-diff/v4", + sum = "h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=", + version = "v4.4.1", + ) + go_repository( + name = "io_k8s_sigs_yaml", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "sigs.k8s.io/yaml", + sum = "h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=", + version = "v1.4.0", + ) + go_repository( + name = "io_k8s_system_validators", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/system-validators", + sum = "h1:tq05tdO9zdJZnNF3SXrq6LE7Knc/KfJm5wk68467JDg=", + version = "v1.8.0", + ) + go_repository( + name = "io_k8s_utils", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "k8s.io/utils", + sum = "h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=", + version = "v0.0.0-20240102154912-e7106e64919e", + ) + go_repository( + name = "io_opencensus_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opencensus.io", + sum = "h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=", + version = "v0.24.0", + ) + go_repository( + name = "io_opencensus_go_contrib_exporter_stackdriver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "contrib.go.opencensus.io/exporter/stackdriver", + sum = "h1:zBakwHardp9Jcb8sQHcHpXy/0+JIb1M8KjigCJzx7+4=", + version = "v0.13.14", + ) + go_repository( + name = "io_opentelemetry_go_contrib_instrumentation_github_com_emicklei_go_restful_otelrestful", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful", + sum = "h1:Z6SbqeRZAl2OczfkFOqLx1BeYBDYehNjEnqluD7581Y=", + version = "v0.42.0", + ) + go_repository( + name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", + sum = "h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA=", + version = "v0.48.0", + ) + go_repository( + name = "io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp", + sum = "h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU=", + version = "v0.48.0", + ) + go_repository( + name = "io_opentelemetry_go_otel", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel", + sum = "h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY=", + version = "v1.23.1", + ) + go_repository( + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", + sum = "h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=", + version = "v1.21.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", + sum = "h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=", + version = "v1.21.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracehttp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp", + sum = "h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=", + version = "v1.19.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_metric", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/metric", + sum = "h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo=", + version = "v1.23.1", + ) + go_repository( + name = "io_opentelemetry_go_otel_sdk", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/sdk", + sum = "h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=", + version = "v1.21.0", + ) + go_repository( + name = "io_opentelemetry_go_otel_trace", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/trace", + sum = "h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8=", + version = "v1.23.1", + ) + go_repository( + name = "io_opentelemetry_go_proto_otlp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/proto/otlp", + sum = "h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=", + version = "v1.0.0", + ) + go_repository( + name = "io_rsc_binaryregexp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "rsc.io/binaryregexp", + sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=", + version = "v0.2.0", + ) + go_repository( + name = "io_rsc_quote_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "rsc.io/quote/v3", + sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=", + version = "v3.1.0", + ) + go_repository( + name = "io_rsc_sampler", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "rsc.io/sampler", + sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=", + version = "v1.3.0", + ) + go_repository( + name = "land_oras_oras_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "oras.land/oras-go", + sum = "h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=", + version = "v1.2.5", + ) + go_repository( + name = "net_starlark_go", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.starlark.net", + sum = "h1:LmbG8Pq7KDGkglKVn8VpZOZj6vb9b8nKEGcg9l03epM=", + version = "v0.0.0-20240123142251-f86470692795", + ) + go_repository( + name = "org_bitbucket_bertimus9_systemstat", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "bitbucket.org/bertimus9/systemstat", + sum = "h1:n0aLnh2Jo4nBUBym9cE5PJDG8GT6g+4VuS2Ya2jYYpA=", + version = "v0.5.0", + ) + go_repository( + name = "org_bitbucket_creachadair_shell", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "bitbucket.org/creachadair/shell", + sum = "h1:Z96pB6DkSb7F3Y3BBnJeOZH2gazyMTWlvecSD4vDqfk=", + version = "v0.0.7", + ) + go_repository( + name = "org_golang_google_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/api", + sum = "h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc=", + version = "v0.165.0", + ) + go_repository( + name = "org_golang_google_appengine", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/appengine", + sum = "h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=", + version = "v1.6.8", + ) + go_repository( + name = "org_golang_google_genproto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto", + sum = "h1:Zmyn5CV/jxzKnF+3d+xzbomACPwLQqVpLTpyXN5uTaQ=", + version = "v0.0.0-20240221002015-b0ce06bbee7c", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o=", + version = "v0.0.0-20240221002015-b0ce06bbee7c", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_bytestream", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/bytestream", + sum = "h1:d9MrRjDhOUCudL8eHNVgZA1ESnBiE7ZKwlZS9foLoRU=", + version = "v0.0.0-20240205150955-31a09d347014", + ) + go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=", + version = "v0.0.0-20240221002015-b0ce06bbee7c", + ) + go_repository( + name = "org_golang_google_grpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + sum = "h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=", + version = "v1.61.1", + ) + go_repository( + name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc/cmd/protoc-gen-go-grpc", + sum = "h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=", + version = "v1.1.0", + ) + go_repository( + name = "org_golang_google_protobuf", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/protobuf", + sum = "h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=", + version = "v1.33.0", + ) + go_repository( + name = "org_golang_x_crypto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/crypto", + sum = "h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=", + version = "v0.19.0", + ) + go_repository( + name = "org_golang_x_exp", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/exp", + sum = "h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=", + version = "v0.0.0-20240213143201-ec583247a57a", + ) + go_repository( + name = "org_golang_x_image", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/image", + sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=", + version = "v0.0.0-20190802002840-cff245a6509b", + ) + go_repository( + name = "org_golang_x_lint", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/lint", + sum = "h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=", + version = "v0.0.0-20210508222113-6edffad5e616", + ) + go_repository( + name = "org_golang_x_mobile", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/mobile", + sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=", + version = "v0.0.0-20190719004257-d2bd2a29d028", + ) + go_repository( + name = "org_golang_x_mod", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/mod", + sum = "h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=", + version = "v0.15.0", + ) + go_repository( + name = "org_golang_x_net", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/net", + sum = "h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=", + version = "v0.21.0", + ) + go_repository( + name = "org_golang_x_oauth2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/oauth2", + sum = "h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=", + version = "v0.17.0", + ) + go_repository( + name = "org_golang_x_sync", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/sync", + sum = "h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=", + version = "v0.6.0", + ) + go_repository( + name = "org_golang_x_sys", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/sys", + sum = "h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=", + version = "v0.17.0", + ) + go_repository( + name = "org_golang_x_telemetry", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/telemetry", + sum = "h1:+Kc94D8UVEVxJnLXp/+FMfqQARZtWHfVrcRtcG8aT3g=", + version = "v0.0.0-20240208230135-b75ee8823808", + ) + go_repository( + name = "org_golang_x_term", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/term", + sum = "h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=", + version = "v0.17.0", + ) + go_repository( + name = "org_golang_x_text", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/text", + sum = "h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=", + version = "v0.14.0", + ) + go_repository( + name = "org_golang_x_time", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/time", + sum = "h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=", + version = "v0.5.0", + ) + go_repository( + name = "org_golang_x_tools", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/tools", + sum = "h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=", + version = "v0.18.0", + ) + go_repository( + name = "org_golang_x_vuln", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/vuln", + sum = "h1:KUas02EjQK5LTuIx1OylBQdKKZ9jeugs+HiqO5HormU=", + version = "v1.0.1", + ) + go_repository( + name = "org_golang_x_xerrors", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "golang.org/x/xerrors", + sum = "h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=", + version = "v0.0.0-20231012003039-104605ab7028", + ) + go_repository( + name = "org_libvirt_go_libvirt", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "libvirt.org/go/libvirt", + # keep + patches = [ + "//3rdparty/bazel/org_libvirt_go_libvirt:go_libvirt.patch", + ], + sum = "h1:fPVWdvZz8TSmMrTnsStih9ETsHlrzIgSEEiFzOLbhO8=", + version = "v1.10000.0", + ) + go_repository( + name = "org_mongodb_go_mongo_driver", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.mongodb.org/mongo-driver", + sum = "h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=", + version = "v1.14.0", + ) + go_repository( + name = "org_mozilla_go_pkcs7", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.mozilla.org/pkcs7", + sum = "h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=", + version = "v0.0.0-20200128120323-432b2356ecb1", + ) + go_repository( + name = "org_uber_go_atomic", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.uber.org/atomic", + sum = "h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=", + version = "v1.10.0", + ) + go_repository( + name = "org_uber_go_goleak", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.uber.org/goleak", + sum = "h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=", + version = "v1.3.0", + ) + go_repository( + name = "org_uber_go_multierr", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.uber.org/multierr", + sum = "h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=", + version = "v1.11.0", + ) + go_repository( + name = "org_uber_go_zap", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.uber.org/zap", + sum = "h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=", + version = "v1.27.0", + ) + go_repository( + name = "sh_elv_src", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "src.elv.sh", + sum = "h1:pjVeIo9Ba6K1Wy+rlwX91zT7A+xGEmxiNRBdN04gDTQ=", + version = "v0.16.0-rc1.0.20220116211855-fda62502ad7f", + ) + go_repository( + name = "sh_helm_helm", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "helm.sh/helm", + sum = "h1:cSe3FaQOpRWLDXvTObQNj0P7WI98IG5yloU6tQVls2k=", + version = "v2.17.0+incompatible", + ) + go_repository( + name = "sh_helm_helm_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "helm.sh/helm/v3", + sum = "h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA=", + version = "v3.14.2", + ) + go_repository( + name = "sm_step_go_crypto", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "go.step.sm/crypto", + sum = "h1:OmwHm3GJO8S4VGWL3k4+I+Q4P/F2s+j8msvTyGnh1Vg=", + version = "v0.42.1", + ) + go_repository( + name = "tools_gotest_v3", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gotest.tools/v3", + sum = "h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=", + version = "v3.4.0", + ) + go_repository( + name = "xyz_gomodules_jsonpatch_v2", + build_file_generation = "on", + build_file_proto_mode = "disable_global", + importpath = "gomodules.xyz/jsonpatch/v2", + sum = "h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=", + version = "v2.4.0", + ) diff --git a/bazel/toolchains/go_rules_deps.bzl b/bazel/toolchains/go_rules_deps.bzl new file mode 100644 index 000000000..ab81aa5d4 --- /dev/null +++ b/bazel/toolchains/go_rules_deps.bzl @@ -0,0 +1,29 @@ +"""Go toolchain dependencies for Bazel. + +Defines hermetic go toolchains and rules to build and test go code. +Gazelle is a build file generator for Bazel projects written in Go. +""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def go_deps(): + http_archive( + name = "io_bazel_rules_go", + sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip", + "https://cdn.confidential.cloud/constellation/cas/sha256/80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184", + "https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip", + ], + type = "zip", + ) + http_archive( + name = "bazel_gazelle", + sha256 = "32938bda16e6700063035479063d9d24c60eda8d79fd4739563f50d331cb3209", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/32938bda16e6700063035479063d9d24c60eda8d79fd4739563f50d331cb3209", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.35.0/bazel-gazelle-v0.35.0.tar.gz", + ], + type = "tar.gz", + ) diff --git a/bazel/toolchains/hermetic_cc_deps.bzl b/bazel/toolchains/hermetic_cc_deps.bzl new file mode 100644 index 000000000..1c498bf31 --- /dev/null +++ b/bazel/toolchains/hermetic_cc_deps.bzl @@ -0,0 +1,16 @@ +"""hermetic cc toolchain (bazel-zig-cc) build rules""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def hermetic_cc_deps(): + """Loads the dependencies for hermetic_cc_toolchain.""" + + http_archive( + name = "hermetic_cc_toolchain", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/3b8107de0d017fe32e6434086a9568f97c60a111b49dc34fc7001e139c30fdea", + "https://github.com/uber/hermetic_cc_toolchain/releases/download/v2.2.1/hermetic_cc_toolchain-v2.2.1.tar.gz", + ], + type = "tar.gz", + sha256 = "3b8107de0d017fe32e6434086a9568f97c60a111b49dc34fc7001e139c30fdea", + ) diff --git a/bazel/toolchains/linux_kernel.bzl b/bazel/toolchains/linux_kernel.bzl index d34d15324..325e542ff 100644 --- a/bazel/toolchains/linux_kernel.bzl +++ b/bazel/toolchains/linux_kernel.bzl @@ -9,74 +9,74 @@ def kernel_rpms(): http_file( name = "kernel_lts", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/7834bc4bc7e088c98505956382884bdc670ab9a9283288b7fef04a43df31356e", - "https://cdn.confidential.cloud/constellation/kernel/6.6.87-100.constellation/kernel-6.6.87-100.constellation.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/b4a8e5217cb62241631d1a7357979face1ad455a08cd4ca8f59c2252a90047f6", + "https://cdn.confidential.cloud/constellation/kernel/6.1.79-100.constellation/kernel-6.1.79-100.constellation.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-lts.rpm", - sha256 = "7834bc4bc7e088c98505956382884bdc670ab9a9283288b7fef04a43df31356e", + sha256 = "b4a8e5217cb62241631d1a7357979face1ad455a08cd4ca8f59c2252a90047f6", ) http_file( name = "kernel_core_lts", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/2763c699d1e2f9810421ac7af2e9c94c6f98533e83f2938c26f1d824e3559b97", - "https://cdn.confidential.cloud/constellation/kernel/6.6.87-100.constellation/kernel-core-6.6.87-100.constellation.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/03e9ae8508cf1cd964216eed858c69d55629d8d27a44965857e408defcfe4785", + "https://cdn.confidential.cloud/constellation/kernel/6.1.79-100.constellation/kernel-core-6.1.79-100.constellation.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-core-lts.rpm", - sha256 = "2763c699d1e2f9810421ac7af2e9c94c6f98533e83f2938c26f1d824e3559b97", + sha256 = "03e9ae8508cf1cd964216eed858c69d55629d8d27a44965857e408defcfe4785", ) http_file( name = "kernel_modules_lts", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/a7604eec263f190db573d809d20336bbf75e46c51f5977f5db95bb88bfec56d3", - "https://cdn.confidential.cloud/constellation/kernel/6.6.87-100.constellation/kernel-modules-6.6.87-100.constellation.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/c306c13f17024915682304e4f05ca21dd9533a34921684520135bc7e69e3d327", + "https://cdn.confidential.cloud/constellation/kernel/6.1.79-100.constellation/kernel-modules-6.1.79-100.constellation.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-modules-lts.rpm", - sha256 = "a7604eec263f190db573d809d20336bbf75e46c51f5977f5db95bb88bfec56d3", + sha256 = "c306c13f17024915682304e4f05ca21dd9533a34921684520135bc7e69e3d327", ) http_file( name = "kernel_modules_core_lts", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/648fd503d7d54608fbd62ace87c4da098f72abbaac1ab7e343327fc24ccef7f8", - "https://cdn.confidential.cloud/constellation/kernel/6.6.87-100.constellation/kernel-modules-core-6.6.87-100.constellation.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/2183a23562e1d69a64e799baf1cf64ae34c4058f24993e3fe7645e3e363e899a", + "https://cdn.confidential.cloud/constellation/kernel/6.1.79-100.constellation/kernel-modules-core-6.1.79-100.constellation.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-modules-core-lts.rpm", - sha256 = "648fd503d7d54608fbd62ace87c4da098f72abbaac1ab7e343327fc24ccef7f8", + sha256 = "2183a23562e1d69a64e799baf1cf64ae34c4058f24993e3fe7645e3e363e899a", ) # mainline kernel http_file( name = "kernel_mainline", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/6eaec29870e6549d95a93b72ea10715507db84b851c68c0d75e44e4c20f895f2", - "https://kojipkgs.fedoraproject.org/packages/kernel/6.8.9/300.fc40/x86_64/kernel-6.8.9-300.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/303506771939b324c32c2d7df4ce2a8ca08af4fe0fec77712084bdd3c1481bc9", + "https://kojipkgs.fedoraproject.org/packages/kernel/6.7.6/100.fc38/x86_64/kernel-6.7.6-100.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-mainline.rpm", - sha256 = "6eaec29870e6549d95a93b72ea10715507db84b851c68c0d75e44e4c20f895f2", + sha256 = "303506771939b324c32c2d7df4ce2a8ca08af4fe0fec77712084bdd3c1481bc9", ) http_file( name = "kernel_core_mainline", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/910fd35209f7dc8185e88dddeaccf6158dd63ad9fd469ef3dc81b96840ef28eb", - "https://kojipkgs.fedoraproject.org/packages/kernel/6.8.9/300.fc40/x86_64/kernel-core-6.8.9-300.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/f51347ddeca348494fe20a898b455f84e1e7c4cda6832fb5dc2d092b94ddc039", + "https://kojipkgs.fedoraproject.org/packages/kernel/6.7.6/100.fc38/x86_64/kernel-core-6.7.6-100.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-core-mainline.rpm", - sha256 = "910fd35209f7dc8185e88dddeaccf6158dd63ad9fd469ef3dc81b96840ef28eb", + sha256 = "f51347ddeca348494fe20a898b455f84e1e7c4cda6832fb5dc2d092b94ddc039", ) http_file( name = "kernel_modules_mainline", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/b8de20433c68d2fe0ca6625e25f314aba36a9327592db8b1478b97bb50521149", - "https://kojipkgs.fedoraproject.org/packages/kernel/6.8.9/300.fc40/x86_64/kernel-modules-6.8.9-300.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/4a0c65aac2628fc24e460f68eb2b1a9b8d749f319d10962257dcfeee7cadb09c", + "https://kojipkgs.fedoraproject.org/packages/kernel/6.7.6/100.fc38/x86_64/kernel-modules-6.7.6-100.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-modules-mainline.rpm", - sha256 = "b8de20433c68d2fe0ca6625e25f314aba36a9327592db8b1478b97bb50521149", + sha256 = "4a0c65aac2628fc24e460f68eb2b1a9b8d749f319d10962257dcfeee7cadb09c", ) http_file( name = "kernel_modules_core_mainline", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/8ecd8e96483810d18e04a20cd8ecef46f27bff0fbb54f23e67adb813828b3cec", - "https://kojipkgs.fedoraproject.org/packages/kernel/6.8.9/300.fc40/x86_64/kernel-modules-core-6.8.9-300.fc40.x86_64.rpm", + "https://cdn.confidential.cloud/constellation/cas/sha256/127a1b895ba6a7842e8503770ccc3b412fca195a9f750bb3f94788c2384ab577", + "https://kojipkgs.fedoraproject.org/packages/kernel/6.7.6/100.fc38/x86_64/kernel-modules-core-6.7.6-100.fc38.x86_64.rpm", ], downloaded_file_path = "kernel-modules-core-mainline.rpm", - sha256 = "8ecd8e96483810d18e04a20cd8ecef46f27bff0fbb54f23e67adb813828b3cec", + sha256 = "127a1b895ba6a7842e8503770ccc3b412fca195a9f750bb3f94788c2384ab577", ) diff --git a/bazel/toolchains/multirun_deps.bzl b/bazel/toolchains/multirun_deps.bzl index ee1f3a584..b34da1cec 100644 --- a/bazel/toolchains/multirun_deps.bzl +++ b/bazel/toolchains/multirun_deps.bzl @@ -5,11 +5,11 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def multirun_deps(): http_archive( name = "com_github_ash2k_bazel_tools", - sha256 = "dc32a65c69c843f1ba2a328b79974163896e5b8ed283cd711abe12bf7cd12ffc", - strip_prefix = "bazel-tools-415483a9e13342a6603a710b0296f6d85b8d26bf", + sha256 = "a911dab6711bc12a00f02cc94b66ced7dc57650e382ebd4f17c9cdb8ec2cbd56", + strip_prefix = "bazel-tools-2add5bb84c2837a82a44b57e83c7414247aed43a", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/dc32a65c69c843f1ba2a328b79974163896e5b8ed283cd711abe12bf7cd12ffc", - "https://github.com/ash2k/bazel-tools/archive/415483a9e13342a6603a710b0296f6d85b8d26bf.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/a911dab6711bc12a00f02cc94b66ced7dc57650e382ebd4f17c9cdb8ec2cbd56", + "https://github.com/ash2k/bazel-tools/archive/2add5bb84c2837a82a44b57e83c7414247aed43a.tar.gz", ], type = "tar.gz", ) diff --git a/bazel/toolchains/nixpkgs_deps.bzl b/bazel/toolchains/nixpkgs_deps.bzl index bdf5f0f1c..8ce9949d7 100644 --- a/bazel/toolchains/nixpkgs_deps.bzl +++ b/bazel/toolchains/nixpkgs_deps.bzl @@ -5,11 +5,11 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def nixpkgs_deps(): http_archive( name = "io_tweag_rules_nixpkgs", - sha256 = "30271f7bd380e4e20e4d7132c324946c4fdbc31ebe0bbb6638a0f61a37e74397", - strip_prefix = "rules_nixpkgs-0.13.0", + sha256 = "d4a8c10121ec7494402a0ae8c1a896ced20d4bef4485b107e37f5331716c3626", + strip_prefix = "rules_nixpkgs-244ae504d3f25534f6d3877ede4ee50e744a5234", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/30271f7bd380e4e20e4d7132c324946c4fdbc31ebe0bbb6638a0f61a37e74397", - "https://github.com/tweag/rules_nixpkgs/releases/download/v0.13.0/rules_nixpkgs-0.13.0.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/d4a8c10121ec7494402a0ae8c1a896ced20d4bef4485b107e37f5331716c3626", + "https://github.com/tweag/rules_nixpkgs/archive/244ae504d3f25534f6d3877ede4ee50e744a5234.tar.gz", ], type = "tar.gz", ) diff --git a/bazel/toolchains/oci_deps.bzl b/bazel/toolchains/oci_deps.bzl index f425e9066..22f5cb148 100644 --- a/bazel/toolchains/oci_deps.bzl +++ b/bazel/toolchains/oci_deps.bzl @@ -7,13 +7,11 @@ def oci_deps(): # Remove this override once https://github.com/bazel-contrib/rules_oci/issues/420 is fixed. http_archive( name = "rules_oci", - strip_prefix = "rules_oci-2.2.5", + strip_prefix = "rules_oci-c622bf79d269473d3d9bc33510e16cfd9a1142bc", type = "tar.gz", urls = [ - "https://cdn.confidential.cloud/constellation/cas/sha256/361c417e8c95cd7c3d8b5cf4b202e76bac8d41532131534ff8e6fa43aa161142", - "https://github.com/bazel-contrib/rules_oci/releases/download/v2.2.5/rules_oci-v2.2.5.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/dca0cfa2a8eb4ab79c231617964fc821f6d1a3bb9d996358975a5ceee5b8d25f", + "https://github.com/bazel-contrib/rules_oci/archive/c622bf79d269473d3d9bc33510e16cfd9a1142bc.tar.gz", ], - sha256 = "361c417e8c95cd7c3d8b5cf4b202e76bac8d41532131534ff8e6fa43aa161142", - patches = ["//bazel/toolchains:0001-disable-Windows-support.patch"], - patch_args = ["-p1"], + sha256 = "dca0cfa2a8eb4ab79c231617964fc821f6d1a3bb9d996358975a5ceee5b8d25f", ) diff --git a/bazel/toolchains/pkg_deps.bzl b/bazel/toolchains/pkg_deps.bzl new file mode 100644 index 000000000..5a4928141 --- /dev/null +++ b/bazel/toolchains/pkg_deps.bzl @@ -0,0 +1,15 @@ +"""rules_pkg dependencies""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def pkg_deps(): + http_archive( + name = "rules_pkg", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.10.1/rules_pkg-0.10.1.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/d250924a2ecc5176808fc4c25d5cf5e9e79e6346d79d5ab1c493e289e722d1d0", + "https://github.com/bazelbuild/rules_pkg/releases/download/0.10.1/rules_pkg-0.10.1.tar.gz", + ], + sha256 = "d250924a2ecc5176808fc4c25d5cf5e9e79e6346d79d5ab1c493e289e722d1d0", + type = "tar.gz", + ) diff --git a/bazel/toolchains/proto_deps.bzl b/bazel/toolchains/proto_deps.bzl new file mode 100644 index 000000000..72b02ff09 --- /dev/null +++ b/bazel/toolchains/proto_deps.bzl @@ -0,0 +1,15 @@ +"""proto toolchain rules""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def proto_deps(): + http_archive( + name = "rules_proto", + sha256 = "17fa03f509b0d1df05c70c174a266ab211d04b9969e41924fd07a81ea171f117", + strip_prefix = "rules_proto-cda0effe6b5af095a6886c67f90c760b83f08c48", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/17fa03f509b0d1df05c70c174a266ab211d04b9969e41924fd07a81ea171f117", + "https://github.com/bazelbuild/rules_proto/archive/cda0effe6b5af095a6886c67f90c760b83f08c48.tar.gz", + ], + type = "tar.gz", + ) diff --git a/bazel/toolchains/python_deps.bzl b/bazel/toolchains/python_deps.bzl new file mode 100644 index 000000000..329a0a6f6 --- /dev/null +++ b/bazel/toolchains/python_deps.bzl @@ -0,0 +1,15 @@ +"""python toolchain rules""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def python_deps(): + http_archive( + name = "rules_python", + strip_prefix = "rules_python-0.31.0", + urls = [ + "https://cdn.confidential.cloud/constellation/cas/sha256/c68bdc4fbec25de5b5493b8819cfc877c4ea299c0dcb15c244c5a00208cde311", + "https://github.com/bazelbuild/rules_python/releases/download/0.31.0/rules_python-0.31.0.tar.gz", + ], + type = "tar.gz", + sha256 = "c68bdc4fbec25de5b5493b8819cfc877c4ea299c0dcb15c244c5a00208cde311", + ) diff --git a/bazel/toolchains/skylib_deps.bzl b/bazel/toolchains/skylib_deps.bzl new file mode 100644 index 000000000..c86bfe9e1 --- /dev/null +++ b/bazel/toolchains/skylib_deps.bzl @@ -0,0 +1,15 @@ +"""bazel skylib""" + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +def skylib_deps(): + http_archive( + name = "bazel_skylib", + sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz", + "https://cdn.confidential.cloud/constellation/cas/sha256/cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz", + ], + type = "tar.gz", + ) diff --git a/bootstrapper/cmd/bootstrapper/BUILD.bazel b/bootstrapper/cmd/bootstrapper/BUILD.bazel index 77896efe7..6a8c61c50 100644 --- a/bootstrapper/cmd/bootstrapper/BUILD.bazel +++ b/bootstrapper/cmd/bootstrapper/BUILD.bazel @@ -21,7 +21,6 @@ go_library( "//bootstrapper/internal/kubernetes/k8sapi", "//bootstrapper/internal/kubernetes/kubewaiter", "//bootstrapper/internal/nodelock", - "//bootstrapper/internal/reboot", "//internal/atls", "//internal/attestation/choose", "//internal/attestation/initialize", diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index f1a533245..ff99b231f 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -43,10 +43,16 @@ const ( ) func main() { + gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging") verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)).WithGroup("bootstrapper") - logger.ReplaceGRPCLogger(logger.GRPCLogger(log)) + + if *gRPCDebug { + logger.ReplaceGRPCLogger(log.WithGroup("gRPC")) + } else { + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())).WithGroup("gRPC")) + } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -80,7 +86,7 @@ func main() { clusterInitJoiner = kubernetes.New( "aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, log, + metadata, &kubewaiter.CloudKubeAPIWaiter{}, ) openDevice = vtpm.OpenVTPM fs = afero.NewOsFs() @@ -96,7 +102,7 @@ func main() { metadataAPI = metadata clusterInitJoiner = kubernetes.New( "gcp", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, log, + metadata, &kubewaiter.CloudKubeAPIWaiter{}, ) openDevice = vtpm.OpenVTPM fs = afero.NewOsFs() @@ -116,7 +122,7 @@ func main() { metadataAPI = metadata clusterInitJoiner = kubernetes.New( "azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, log, + metadata, &kubewaiter.CloudKubeAPIWaiter{}, ) openDevice = vtpm.OpenVTPM @@ -126,7 +132,7 @@ func main() { metadata := qemucloud.New() clusterInitJoiner = kubernetes.New( "qemu", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, log, + metadata, &kubewaiter.CloudKubeAPIWaiter{}, ) metadataAPI = metadata @@ -149,7 +155,7 @@ func main() { } clusterInitJoiner = kubernetes.New( "openstack", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, log, + metadata, &kubewaiter.CloudKubeAPIWaiter{}, ) metadataAPI = metadata openDevice = vtpm.OpenVTPM diff --git a/bootstrapper/cmd/bootstrapper/run.go b/bootstrapper/cmd/bootstrapper/run.go index fdb5891a6..733444bee 100644 --- a/bootstrapper/cmd/bootstrapper/run.go +++ b/bootstrapper/cmd/bootstrapper/run.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -11,14 +11,13 @@ import ( "fmt" "log/slog" "net" - "sync" + "os" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/initserver" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/joinclient" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/nodelock" - "github.com/edgelesssys/constellation/v2/bootstrapper/internal/reboot" "github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/attestation/initialize" "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" @@ -33,8 +32,7 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl ) { log.With(slog.String("version", constants.BinaryVersion().String())).Info("Starting bootstrapper") - disk := diskencryption.New() - uuid, err := getDiskUUID(disk) + uuid, err := getDiskUUID() if err != nil { log.With(slog.Any("error", err)).Error("Failed to get disk UUID") } else { @@ -44,58 +42,43 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice) if err != nil { log.With(slog.Any("error", err)).Error("Failed to check if node was previously bootstrapped") - reboot.Reboot(fmt.Errorf("checking if node was previously bootstrapped: %w", err)) + os.Exit(1) } if nodeBootstrapped { if err := kube.StartKubelet(); err != nil { log.With(slog.Any("error", err)).Error("Failed to restart kubelet") - reboot.Reboot(fmt.Errorf("restarting kubelet: %w", err)) + os.Exit(1) } return } nodeLock := nodelock.New(openDevice) - initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, disk, fileHandler, metadata, log) + initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log) if err != nil { log.With(slog.Any("error", err)).Error("Failed to create init server") - reboot.Reboot(fmt.Errorf("creating init server: %w", err)) + os.Exit(1) } dialer := dialer.New(issuer, nil, &net.Dialer{}) - joinClient := joinclient.New(nodeLock, dialer, kube, metadata, disk, log) + joinClient := joinclient.New(nodeLock, dialer, kube, metadata, log) cleaner := clean.New().With(initServer).With(joinClient) go cleaner.Start() defer cleaner.Done() - var wg sync.WaitGroup + joinClient.Start(cleaner) - wg.Add(1) - go func() { - defer wg.Done() - if err := joinClient.Start(cleaner); err != nil { - log.With(slog.Any("error", err)).Error("Failed to join cluster") - markDiskForReset(disk) - reboot.Reboot(fmt.Errorf("joining cluster: %w", err)) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { - log.With(slog.Any("error", err)).Error("Failed to serve init server") - markDiskForReset(disk) - reboot.Reboot(fmt.Errorf("serving init server: %w", err)) - } - }() - wg.Wait() + if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { + log.With(slog.Any("error", err)).Error("Failed to serve init server") + os.Exit(1) + } log.Info("bootstrapper done") } -func getDiskUUID(disk *diskencryption.DiskEncryption) (string, error) { +func getDiskUUID() (string, error) { + disk := diskencryption.New() free, err := disk.Open() if err != nil { return "", err @@ -104,22 +87,6 @@ func getDiskUUID(disk *diskencryption.DiskEncryption) (string, error) { return disk.UUID() } -// markDiskForReset sets a token in the cryptsetup header of the disk to indicate the disk should be reset on next boot. -// This is used to reset all state of a node in case the bootstrapper encountered a non recoverable error -// after the node successfully retrieved a join ticket from the JoinService. -// As setting this token is safe as long as we are certain we don't need the data on the disk anymore, we call this -// unconditionally when either the JoinClient or the InitServer encounter an error. -// We don't call it before that, as the node may be restarting after a previous, successful bootstrapping, -// and now encountered a transient error on rejoining the cluster. Wiping the disk now would delete existing data. -func markDiskForReset(disk *diskencryption.DiskEncryption) { - free, err := disk.Open() - if err != nil { - return - } - defer free() - _ = disk.MarkDiskForReset() -} - type clusterInitJoiner interface { joinclient.ClusterJoiner initserver.ClusterInitializer diff --git a/bootstrapper/cmd/bootstrapper/test.go b/bootstrapper/cmd/bootstrapper/test.go index 0f6707bcd..05840de33 100644 --- a/bootstrapper/cmd/bootstrapper/test.go +++ b/bootstrapper/cmd/bootstrapper/test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main import ( "context" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/role" @@ -21,13 +22,13 @@ type clusterFake struct{} // InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster. func (c *clusterFake) InitCluster( context.Context, string, string, - bool, components.Components, []string, string, + bool, components.Components, []string, string, *slog.Logger, ) ([]byte, error) { return []byte{}, nil } // JoinCluster will fake joining the current node to an existing cluster. -func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components) error { +func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error { return nil } diff --git a/bootstrapper/initproto/init.pb.go b/bootstrapper/initproto/init.pb.go index 5ce2213ae..e2d1e2cf6 100644 --- a/bootstrapper/initproto/init.pb.go +++ b/bootstrapper/initproto/init.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: bootstrapper/initproto/init.proto package initproto @@ -16,7 +16,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -27,7 +26,10 @@ const ( ) type InitRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + KmsUri string `protobuf:"bytes,1,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"` StorageUri string `protobuf:"bytes,2,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"` MeasurementSalt []byte `protobuf:"bytes,3,opt,name=measurement_salt,json=measurementSalt,proto3" json:"measurement_salt,omitempty"` @@ -38,15 +40,15 @@ type InitRequest struct { ClusterName string `protobuf:"bytes,9,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` ApiserverCertSans []string `protobuf:"bytes,10,rep,name=apiserver_cert_sans,json=apiserverCertSans,proto3" json:"apiserver_cert_sans,omitempty"` ServiceCidr string `protobuf:"bytes,11,opt,name=service_cidr,json=serviceCidr,proto3" json:"service_cidr,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *InitRequest) Reset() { *x = InitRequest{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *InitRequest) String() string { @@ -57,7 +59,7 @@ func (*InitRequest) ProtoMessage() {} func (x *InitRequest) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,22 +145,25 @@ func (x *InitRequest) GetServiceCidr() string { } type InitResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Kind: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: // // *InitResponse_InitSuccess // *InitResponse_InitFailure // *InitResponse_Log - Kind isInitResponse_Kind `protobuf_oneof:"kind"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Kind isInitResponse_Kind `protobuf_oneof:"kind"` } func (x *InitResponse) Reset() { *x = InitResponse{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *InitResponse) String() string { @@ -169,7 +174,7 @@ func (*InitResponse) ProtoMessage() {} func (x *InitResponse) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -184,36 +189,30 @@ func (*InitResponse) Descriptor() ([]byte, []int) { return file_bootstrapper_initproto_init_proto_rawDescGZIP(), []int{1} } -func (x *InitResponse) GetKind() isInitResponse_Kind { - if x != nil { - return x.Kind +func (m *InitResponse) GetKind() isInitResponse_Kind { + if m != nil { + return m.Kind } return nil } func (x *InitResponse) GetInitSuccess() *InitSuccessResponse { - if x != nil { - if x, ok := x.Kind.(*InitResponse_InitSuccess); ok { - return x.InitSuccess - } + if x, ok := x.GetKind().(*InitResponse_InitSuccess); ok { + return x.InitSuccess } return nil } func (x *InitResponse) GetInitFailure() *InitFailureResponse { - if x != nil { - if x, ok := x.Kind.(*InitResponse_InitFailure); ok { - return x.InitFailure - } + if x, ok := x.GetKind().(*InitResponse_InitFailure); ok { + return x.InitFailure } return nil } func (x *InitResponse) GetLog() *LogResponseType { - if x != nil { - if x, ok := x.Kind.(*InitResponse_Log); ok { - return x.Log - } + if x, ok := x.GetKind().(*InitResponse_Log); ok { + return x.Log } return nil } @@ -241,19 +240,22 @@ func (*InitResponse_InitFailure) isInitResponse_Kind() {} func (*InitResponse_Log) isInitResponse_Kind() {} type InitSuccessResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Kubeconfig []byte `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` - OwnerId []byte `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` - ClusterId []byte `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Kubeconfig []byte `protobuf:"bytes,1,opt,name=kubeconfig,proto3" json:"kubeconfig,omitempty"` + OwnerId []byte `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` + ClusterId []byte `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` } func (x *InitSuccessResponse) Reset() { *x = InitSuccessResponse{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *InitSuccessResponse) String() string { @@ -264,7 +266,7 @@ func (*InitSuccessResponse) ProtoMessage() {} func (x *InitSuccessResponse) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -301,17 +303,20 @@ func (x *InitSuccessResponse) GetClusterId() []byte { } type InitFailureResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *InitFailureResponse) Reset() { *x = InitFailureResponse{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *InitFailureResponse) String() string { @@ -322,7 +327,7 @@ func (*InitFailureResponse) ProtoMessage() {} func (x *InitFailureResponse) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -345,17 +350,20 @@ func (x *InitFailureResponse) GetError() string { } type LogResponseType struct { - state protoimpl.MessageState `protogen:"open.v1"` - Log []byte `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Log []byte `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` } func (x *LogResponseType) Reset() { *x = LogResponseType{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LogResponseType) String() string { @@ -366,7 +374,7 @@ func (*LogResponseType) ProtoMessage() {} func (x *LogResponseType) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -389,20 +397,23 @@ func (x *LogResponseType) GetLog() []byte { } type KubernetesComponent struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - InstallPath string `protobuf:"bytes,3,opt,name=install_path,json=installPath,proto3" json:"install_path,omitempty"` - Extract bool `protobuf:"varint,4,opt,name=extract,proto3" json:"extract,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + InstallPath string `protobuf:"bytes,3,opt,name=install_path,json=installPath,proto3" json:"install_path,omitempty"` + Extract bool `protobuf:"varint,4,opt,name=extract,proto3" json:"extract,omitempty"` } func (x *KubernetesComponent) Reset() { *x = KubernetesComponent{} - mi := &file_bootstrapper_initproto_init_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_bootstrapper_initproto_init_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KubernetesComponent) String() string { @@ -413,7 +424,7 @@ func (*KubernetesComponent) ProtoMessage() {} func (x *KubernetesComponent) ProtoReflect() protoreflect.Message { mi := &file_bootstrapper_initproto_init_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -458,61 +469,98 @@ func (x *KubernetesComponent) GetExtract() bool { var File_bootstrapper_initproto_init_proto protoreflect.FileDescriptor -const file_bootstrapper_initproto_init_proto_rawDesc = "" + - "\n" + - "!bootstrapper/initproto/init.proto\x12\x04init\x1a-internal/versions/components/components.proto\"\xd0\x03\n" + - "\vInitRequest\x12\x17\n" + - "\akms_uri\x18\x01 \x01(\tR\x06kmsUri\x12\x1f\n" + - "\vstorage_uri\x18\x02 \x01(\tR\n" + - "storageUri\x12)\n" + - "\x10measurement_salt\x18\x03 \x01(\fR\x0fmeasurementSalt\x12-\n" + - "\x12kubernetes_version\x18\x05 \x01(\tR\x11kubernetesVersion\x12)\n" + - "\x10conformance_mode\x18\x06 \x01(\bR\x0fconformanceMode\x12J\n" + - "\x15kubernetes_components\x18\a \x03(\v2\x15.components.ComponentR\x14kubernetesComponents\x12\x1f\n" + - "\vinit_secret\x18\b \x01(\fR\n" + - "initSecret\x12!\n" + - "\fcluster_name\x18\t \x01(\tR\vclusterName\x12.\n" + - "\x13apiserver_cert_sans\x18\n" + - " \x03(\tR\x11apiserverCertSans\x12!\n" + - "\fservice_cidr\x18\v \x01(\tR\vserviceCidrJ\x04\b\x04\x10\x05R\x19cloud_service_account_uri\"\xc1\x01\n" + - "\fInitResponse\x12>\n" + - "\finit_success\x18\x01 \x01(\v2\x19.init.InitSuccessResponseH\x00R\vinitSuccess\x12>\n" + - "\finit_failure\x18\x02 \x01(\v2\x19.init.InitFailureResponseH\x00R\vinitFailure\x12)\n" + - "\x03log\x18\x03 \x01(\v2\x15.init.LogResponseTypeH\x00R\x03logB\x06\n" + - "\x04kind\"o\n" + - "\x13InitSuccessResponse\x12\x1e\n" + - "\n" + - "kubeconfig\x18\x01 \x01(\fR\n" + - "kubeconfig\x12\x19\n" + - "\bowner_id\x18\x02 \x01(\fR\aownerId\x12\x1d\n" + - "\n" + - "cluster_id\x18\x03 \x01(\fR\tclusterId\"+\n" + - "\x13InitFailureResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"#\n" + - "\x0fLogResponseType\x12\x10\n" + - "\x03log\x18\x01 \x01(\fR\x03log\"x\n" + - "\x13KubernetesComponent\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" + - "\x04hash\x18\x02 \x01(\tR\x04hash\x12!\n" + - "\finstall_path\x18\x03 \x01(\tR\vinstallPath\x12\x18\n" + - "\aextract\x18\x04 \x01(\bR\aextract26\n" + - "\x03API\x12/\n" + - "\x04Init\x12\x11.init.InitRequest\x1a\x12.init.InitResponse0\x01B@Z>github.com/edgelesssys/constellation/v2/bootstrapper/initprotob\x06proto3" +var file_bootstrapper_initproto_init_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x69, + 0x6e, 0x69, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x1a, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0, 0x03, 0x0a, 0x0b, 0x49, 0x6e, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x55, 0x72, + 0x69, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, + 0x72, 0x69, 0x12, 0x29, 0x0a, 0x10, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x6d, 0x65, + 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6c, 0x74, 0x12, 0x2d, 0x0a, + 0x12, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x75, 0x62, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x65, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x6e, 0x63, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x4a, 0x0a, 0x15, 0x6b, 0x75, 0x62, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x14, 0x6b, + 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x69, 0x6e, 0x69, 0x74, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x70, 0x69, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x61, 0x6e, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x69, 0x64, 0x72, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x52, 0x19, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x22, 0xc1, 0x01, 0x0a, 0x0c, + 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0c, + 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x0b, 0x69, 0x6e, 0x69, 0x74, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x3e, 0x0a, 0x0c, + 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x0b, 0x69, 0x6e, 0x69, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x29, 0x0a, 0x03, + 0x6c, 0x6f, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x69, 0x6e, 0x69, 0x74, + 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x48, 0x00, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6f, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6b, 0x75, 0x62, 0x65, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x22, 0x2b, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x23, 0x0a, + 0x0f, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6c, + 0x6f, 0x67, 0x22, 0x78, 0x0a, 0x13, 0x4b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x32, 0x36, 0x0a, 0x03, + 0x41, 0x50, 0x49, 0x12, 0x2f, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x11, 0x2e, 0x69, 0x6e, + 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, + 0x2e, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, + 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x69, 0x6e, 0x69, + 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_bootstrapper_initproto_init_proto_rawDescOnce sync.Once - file_bootstrapper_initproto_init_proto_rawDescData []byte + file_bootstrapper_initproto_init_proto_rawDescData = file_bootstrapper_initproto_init_proto_rawDesc ) func file_bootstrapper_initproto_init_proto_rawDescGZIP() []byte { file_bootstrapper_initproto_init_proto_rawDescOnce.Do(func() { - file_bootstrapper_initproto_init_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_bootstrapper_initproto_init_proto_rawDesc), len(file_bootstrapper_initproto_init_proto_rawDesc))) + file_bootstrapper_initproto_init_proto_rawDescData = protoimpl.X.CompressGZIP(file_bootstrapper_initproto_init_proto_rawDescData) }) return file_bootstrapper_initproto_init_proto_rawDescData } var file_bootstrapper_initproto_init_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_bootstrapper_initproto_init_proto_goTypes = []any{ +var file_bootstrapper_initproto_init_proto_goTypes = []interface{}{ (*InitRequest)(nil), // 0: init.InitRequest (*InitResponse)(nil), // 1: init.InitResponse (*InitSuccessResponse)(nil), // 2: init.InitSuccessResponse @@ -540,7 +588,81 @@ func file_bootstrapper_initproto_init_proto_init() { if File_bootstrapper_initproto_init_proto != nil { return } - file_bootstrapper_initproto_init_proto_msgTypes[1].OneofWrappers = []any{ + if !protoimpl.UnsafeEnabled { + file_bootstrapper_initproto_init_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bootstrapper_initproto_init_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bootstrapper_initproto_init_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitSuccessResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bootstrapper_initproto_init_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitFailureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bootstrapper_initproto_init_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogResponseType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_bootstrapper_initproto_init_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KubernetesComponent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_bootstrapper_initproto_init_proto_msgTypes[1].OneofWrappers = []interface{}{ (*InitResponse_InitSuccess)(nil), (*InitResponse_InitFailure)(nil), (*InitResponse_Log)(nil), @@ -549,7 +671,7 @@ func file_bootstrapper_initproto_init_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_bootstrapper_initproto_init_proto_rawDesc), len(file_bootstrapper_initproto_init_proto_rawDesc)), + RawDescriptor: file_bootstrapper_initproto_init_proto_rawDesc, NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -560,6 +682,7 @@ func file_bootstrapper_initproto_init_proto_init() { MessageInfos: file_bootstrapper_initproto_init_proto_msgTypes, }.Build() File_bootstrapper_initproto_init_proto = out.File + file_bootstrapper_initproto_init_proto_rawDesc = nil file_bootstrapper_initproto_init_proto_goTypes = nil file_bootstrapper_initproto_init_proto_depIdxs = nil } diff --git a/bootstrapper/internal/addresses/BUILD.bazel b/bootstrapper/internal/addresses/BUILD.bazel deleted file mode 100644 index 9311c4a61..000000000 --- a/bootstrapper/internal/addresses/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel/go:go_test.bzl", "go_test") - -go_library( - name = "interfaces", - srcs = ["interfaces.go"], - importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/interfaces", - visibility = ["//bootstrapper:__subpackages__"], -) - -go_library( - name = "addresses", - srcs = ["addresses.go"], - importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/addresses", - visibility = ["//bootstrapper:__subpackages__"], -) - -go_test( - name = "addresses_test", - srcs = ["addresses_test.go"], - deps = [ - ":addresses", - "@com_github_stretchr_testify//assert", - "@com_github_stretchr_testify//require", - ], -) diff --git a/bootstrapper/internal/addresses/addresses.go b/bootstrapper/internal/addresses/addresses.go deleted file mode 100644 index d17563b4f..000000000 --- a/bootstrapper/internal/addresses/addresses.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package addresses - -import ( - "net" -) - -// GetMachineNetworkAddresses retrieves all network interface addresses. -func GetMachineNetworkAddresses(interfaces []NetInterface) ([]string, error) { - var addresses []string - - for _, i := range interfaces { - addrs, err := i.Addrs() - if err != nil { - return nil, err - } - for _, addr := range addrs { - var ip net.IP - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - default: - continue - } - if ip.IsLoopback() { - continue - } - addresses = append(addresses, ip.String()) - } - } - - return addresses, nil -} - -// NetInterface represents a network interface used to get network addresses. -type NetInterface interface { - Addrs() ([]net.Addr, error) -} diff --git a/bootstrapper/internal/addresses/addresses_test.go b/bootstrapper/internal/addresses/addresses_test.go deleted file mode 100644 index ceb881ddf..000000000 --- a/bootstrapper/internal/addresses/addresses_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package addresses_test - -import ( - "errors" - "net" - "testing" - - "github.com/edgelesssys/constellation/v2/bootstrapper/internal/addresses" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetMachineNetworkAddresses(t *testing.T) { - _, someAddr, err := net.ParseCIDR("10.9.0.1/24") - require.NoError(t, err) - - testCases := map[string]struct { - interfaces []addresses.NetInterface - wantErr bool - }{ - "successful": { - interfaces: []addresses.NetInterface{ - &mockNetInterface{ - addrs: []net.Addr{ - someAddr, - }, - }, - }, - }, - "unsuccessful": { - interfaces: []addresses.NetInterface{ - &mockNetInterface{addrs: nil, err: errors.New("someError")}, - }, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - addrs, err := addresses.GetMachineNetworkAddresses(tc.interfaces) - - if tc.wantErr { - assert.Error(err) - } else { - assert.Equal([]string{"10.9.0.0"}, addrs) - assert.NoError(err) - } - }) - } -} - -type mockNetInterface struct { - addrs []net.Addr - err error -} - -func (m *mockNetInterface) Addrs() ([]net.Addr, error) { - return m.addrs, m.err -} diff --git a/bootstrapper/internal/certificate/certificate.go b/bootstrapper/internal/certificate/certificate.go index f99ebae02..e27836bcd 100644 --- a/bootstrapper/internal/certificate/certificate.go +++ b/bootstrapper/internal/certificate/certificate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package certificate provides functions to create a certificate request and matching private key. diff --git a/bootstrapper/internal/clean/clean.go b/bootstrapper/internal/clean/clean.go index 28d9e795d..71d46b4ec 100644 --- a/bootstrapper/internal/clean/clean.go +++ b/bootstrapper/internal/clean/clean.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package clean provides functionality to stop a list of services gracefully and synchronously. diff --git a/bootstrapper/internal/clean/clean_test.go b/bootstrapper/internal/clean/clean_test.go index 5083265e0..f4dd2558c 100644 --- a/bootstrapper/internal/clean/clean_test.go +++ b/bootstrapper/internal/clean/clean_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package clean diff --git a/bootstrapper/internal/diskencryption/diskencryption.go b/bootstrapper/internal/diskencryption/diskencryption.go index 5cd3f543b..eaf97e7ab 100644 --- a/bootstrapper/internal/diskencryption/diskencryption.go +++ b/bootstrapper/internal/diskencryption/diskencryption.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package diskencryption handles interaction with a node's state disk. @@ -60,11 +60,6 @@ func (c *DiskEncryption) UpdatePassphrase(passphrase string) error { return c.device.SetConstellationStateDiskToken(cryptsetup.SetDiskInitialized) } -// MarkDiskForReset marks the state disk as not initialized so it may be wiped (reset) on reboot. -func (c *DiskEncryption) MarkDiskForReset() error { - return c.device.SetConstellationStateDiskToken(cryptsetup.SetDiskNotInitialized) -} - // getInitialPassphrase retrieves the initial passphrase used on first boot. func (c *DiskEncryption) getInitialPassphrase() (string, error) { passphrase, err := afero.ReadFile(c.fs, initialKeyPath) diff --git a/bootstrapper/internal/diskencryption/diskencryption_test.go b/bootstrapper/internal/diskencryption/diskencryption_test.go index 7cb5c6483..3de14842a 100644 --- a/bootstrapper/internal/diskencryption/diskencryption_test.go +++ b/bootstrapper/internal/diskencryption/diskencryption_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package diskencryption diff --git a/bootstrapper/internal/etcdio/BUILD.bazel b/bootstrapper/internal/etcdio/BUILD.bazel deleted file mode 100644 index 7f33bd901..000000000 --- a/bootstrapper/internal/etcdio/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "etcdio", - srcs = [ - "etcdio.go", - "setioprio_cross.go", - "setioprio_linux.go", - ], - importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/etcdio", - visibility = ["//bootstrapper:__subpackages__"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "@org_golang_x_sys//unix", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "@org_golang_x_sys//unix", - ], - "//conditions:default": [], - }), -) diff --git a/bootstrapper/internal/etcdio/etcdio.go b/bootstrapper/internal/etcdio/etcdio.go deleted file mode 100644 index 0befb22ee..000000000 --- a/bootstrapper/internal/etcdio/etcdio.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -// The etcdio package provides utilities to manage etcd I/O. -package etcdio - -import ( - "context" - "errors" - "fmt" - "log/slog" - "os" - "path" - "strconv" - "time" -) - -var ( - // ErrNoEtcdProcess is returned when no etcd process is found on the node. - ErrNoEtcdProcess = errors.New("no etcd process found on node") - // ErrMultipleEtcdProcesses is returned when multiple etcd processes are found on the node. - ErrMultipleEtcdProcesses = errors.New("multiple etcd processes found on node") -) - -const ( - // Tells the syscall that a process' priority is going to be set. - // See https://elixir.bootlin.com/linux/v6.9.1/source/include/uapi/linux/ioprio.h#L54. - ioPrioWhoProcess = 1 - - // See https://elixir.bootlin.com/linux/v6.9.1/source/include/uapi/linux/ioprio.h#L11. - ioPrioClassShift = 13 - ioPrioNrClasses = 8 - ioPrioClassMask = ioPrioNrClasses - 1 - ioPrioPrioMask = (1 << ioPrioClassShift) - 1 - - targetClass = 1 // Realtime IO class for best scheduling prio - targetPrio = 0 // Highest priority within the class -) - -// Client is a client for managing etcd I/O. -type Client struct { - log *slog.Logger -} - -// NewClient creates a new etcd I/O management client. -func NewClient(log *slog.Logger) *Client { - return &Client{log: log} -} - -// PrioritizeIO tries to prioritize the I/O of the etcd process. -// Since it might be possible that the process just started (if this method is called -// right after the kubelet started), it retries to do its work each second -// until it succeeds or the timeout of 10 seconds is reached. -func (c *Client) PrioritizeIO() { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - for { - c.log.Info("Prioritizing etcd I/O") - err := c.setIOPriority() - if err == nil { - // Success, return directly - return - } else if errors.Is(err, ErrNoEtcdProcess) { - c.log.Info("No etcd process found, retrying") - } else { - c.log.Warn("Prioritizing etcd I/O failed", "error", err) - return - } - - select { - case <-ticker.C: - case <-timeout.Done(): - c.log.Warn("Timed out waiting for etcd to start") - return - } - } -} - -// setIOPriority tries to find the etcd process on the node and prioritizes its I/O. -func (c *Client) setIOPriority() error { - // find etcd process(es) - pid, err := c.findEtcdProcess() - if err != nil { - return fmt.Errorf("finding etcd process: %w", err) - } - - // Highest realtime priority value for the etcd process, see https://elixir.bootlin.com/linux/v6.9.1/source/include/uapi/linux/ioprio.h - // for the calculation details. - prioVal := ((targetClass & ioPrioClassMask) << ioPrioClassShift) | (targetPrio & ioPrioPrioMask) - - // see https://man7.org/linux/man-pages/man2/ioprio_set.2.html - ret, _, errno := setioprio(ioPrioWhoProcess, uintptr(pid), uintptr(prioVal)) - if ret != 0 { - return fmt.Errorf("setting I/O priority for etcd: %w", errno) - } - - return nil -} - -// findEtcdProcess tries to find the etcd process on the node. -func (c *Client) findEtcdProcess() (int, error) { - procDir, err := os.Open("/proc") - if err != nil { - return 0, fmt.Errorf("opening /proc: %w", err) - } - defer procDir.Close() - - procEntries, err := procDir.Readdirnames(0) - if err != nil { - return 0, fmt.Errorf("reading /proc: %w", err) - } - - // find etcd process(es) - etcdPIDs := []int{} - for _, f := range procEntries { - // exclude non-pid dirs - if f[0] < '0' || f[0] > '9' { - continue - } - - exe, err := os.Readlink(fmt.Sprintf("/proc/%s/exe", f)) - if err != nil { - continue - } - - if path.Base(exe) != "etcd" { - continue - } - - pid, err := strconv.Atoi(f) - if err != nil { - continue - } - - // add the PID to the list of etcd PIDs - etcdPIDs = append(etcdPIDs, pid) - } - - if len(etcdPIDs) == 0 { - return 0, ErrNoEtcdProcess - } - - if len(etcdPIDs) > 1 { - return 0, ErrMultipleEtcdProcesses - } - - return etcdPIDs[0], nil -} diff --git a/bootstrapper/internal/etcdio/setioprio_cross.go b/bootstrapper/internal/etcdio/setioprio_cross.go deleted file mode 100644 index 68e5f8e1e..000000000 --- a/bootstrapper/internal/etcdio/setioprio_cross.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package etcdio - -import ( - "syscall" -) - -func setioprio(_, _, _ uintptr) (uintptr, uintptr, syscall.Errno) { - panic("setioprio not implemented on non-Linux platforms") -} diff --git a/bootstrapper/internal/etcdio/setioprio_linux.go b/bootstrapper/internal/etcdio/setioprio_linux.go deleted file mode 100644 index ecd1df82d..000000000 --- a/bootstrapper/internal/etcdio/setioprio_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package etcdio - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -func setioprio(ioPrioWhoProcess, pid, prioVal uintptr) (uintptr, uintptr, syscall.Errno) { - return unix.Syscall(unix.SYS_IOPRIO_SET, ioPrioWhoProcess, pid, prioVal) -} diff --git a/bootstrapper/internal/initserver/BUILD.bazel b/bootstrapper/internal/initserver/BUILD.bazel index c25ec5c4a..009bb0594 100644 --- a/bootstrapper/internal/initserver/BUILD.bazel +++ b/bootstrapper/internal/initserver/BUILD.bazel @@ -8,11 +8,10 @@ go_library( visibility = ["//bootstrapper:__subpackages__"], deps = [ "//bootstrapper/initproto", - "//bootstrapper/internal/addresses", + "//bootstrapper/internal/diskencryption", "//bootstrapper/internal/journald", "//internal/atls", "//internal/attestation", - "//internal/constants", "//internal/crypto", "//internal/file", "//internal/grpc/atlscredentials", @@ -23,12 +22,11 @@ go_library( "//internal/nodestate", "//internal/role", "//internal/versions/components", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_crypto//bcrypt", - "@org_golang_x_crypto//ssh", ], ) @@ -44,7 +42,6 @@ go_test( "//bootstrapper/initproto", "//internal/atls", "//internal/attestation/variant", - "//internal/constants", "//internal/crypto/testvector", "//internal/file", "//internal/kms/setup", @@ -54,9 +51,8 @@ go_test( "@com_github_spf13_afero//:afero", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_x_crypto//bcrypt", - "@org_golang_x_crypto//ssh", "@org_uber_go_goleak//:goleak", ], ) diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index 5f118d8c0..ff2e5e975 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -20,23 +20,20 @@ package initserver import ( "bufio" "context" - "crypto/ed25519" "errors" "fmt" "io" "log/slog" "net" - "os" "strings" "sync" "time" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto" - "github.com/edgelesssys/constellation/v2/bootstrapper/internal/addresses" + "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/journald" "github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials" @@ -48,7 +45,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/ssh" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/keepalive" @@ -69,7 +65,6 @@ type Server struct { shutdownLock sync.RWMutex initSecretHash []byte - initFailure error kmsURI string @@ -81,10 +76,7 @@ type Server struct { } // New creates a new initialization server. -func New( - ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, - disk encryptedDisk, fh file.Handler, metadata MetadataAPI, log *slog.Logger, -) (*Server, error) { +func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *slog.Logger) (*Server, error) { log = log.WithGroup("initServer") initSecretHash, err := metadata.InitSecretHash(ctx) @@ -102,7 +94,7 @@ func New( server := &Server{ nodeLock: lock, - disk: disk, + disk: diskencryption.New(), initializer: kube, fileHandler: fh, issuer: issuer, @@ -114,7 +106,7 @@ func New( grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), - logger.GetServerUnaryInterceptor(logger.GRPCLogger(log)), + logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")), ) initproto.RegisterAPIServer(grpcServer, server) @@ -131,20 +123,11 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error { } s.log.Info("Starting") - err = s.grpcServer.Serve(lis) - - // If Init failed, we mark the disk for reset, so the node can restart the process - // In this case we don't care about any potential errors from the grpc server - if s.initFailure != nil { - s.log.Error("Fatal error during Init request", "error", s.initFailure) - return err - } - - return err + return s.grpcServer.Serve(lis) } // Init initializes the cluster. -func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServer) (retErr error) { +func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServer) (err error) { // Acquire lock to prevent shutdown while Init is still running s.shutdownLock.RLock() defer s.shutdownLock.RUnlock() @@ -155,23 +138,35 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe s.kmsURI = req.KmsUri if err := bcrypt.CompareHashAndPassword(s.initSecretHash, req.InitSecret); err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "invalid init secret %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "invalid init secret %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } cloudKms, err := kmssetup.KMS(stream.Context(), req.StorageUri, req.KmsUri) if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "creating kms client: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "creating kms client: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } // generate values for cluster attestation clusterID, err := deriveMeasurementValues(stream.Context(), req.MeasurementSalt, cloudKms) if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "deriving measurement values: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "deriving measurement values: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } nodeLockAcquired, err := s.nodeLock.TryLockOnce(clusterID) if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "locking node: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "locking node: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } if !nodeLockAcquired { // The join client seems to already have a connection to an @@ -193,12 +188,12 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe // since we are bootstrapping a new one. // Any errors following this call will result in a failed node that may not join any cluster. s.cleaner.Clean() - defer func() { - s.initFailure = retErr - }() if err := s.setupDisk(stream.Context(), cloudKms); err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "setting up disk: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "setting up disk: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } state := nodestate.NodeState{ @@ -206,63 +201,10 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe MeasurementSalt: req.MeasurementSalt, } if err := state.ToFile(s.fileHandler); err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "persisting node state: %s", err))) - } - - // Derive the emergency ssh CA key - key, err := cloudKms.GetDEK(stream.Context(), crypto.DEKPrefix+constants.SSHCAKeySuffix, ed25519.SeedSize) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "retrieving DEK for key derivation: %s", err))) - } - ca, err := crypto.GenerateEmergencySSHCAKey(key) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "generating emergency SSH CA key: %s", err))) - } - if err := s.fileHandler.Write(constants.SSHCAKeyPath, ssh.MarshalAuthorizedKey(ca.PublicKey()), file.OptMkdirAll); err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "writing ssh CA pubkey: %s", err))) - } - - interfaces, err := net.Interfaces() - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "getting network interfaces: %s", err))) - } - // Needed since go doesn't implicitly convert slices of structs to slices of interfaces - interfacesForFunc := make([]addresses.NetInterface, len(interfaces)) - for i := range interfaces { - interfacesForFunc[i] = &interfaces[i] - } - - principalList, err := addresses.GetMachineNetworkAddresses(interfacesForFunc) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "failed to get network addresses: %s", err))) - } - hostname, err := os.Hostname() - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "failed to get hostname: %s", err))) - } - - principalList = append(principalList, hostname) - principalList = append(principalList, req.ApiserverCertSans...) - - hostKeyContent, err := s.fileHandler.Read(constants.SSHHostKeyPath) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "failed to read host SSH key: %s", err))) - } - - hostPrivateKey, err := ssh.ParsePrivateKey(hostKeyContent) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "failed to parse host SSH key: %s", err))) - } - - hostKeyPubSSH := hostPrivateKey.PublicKey() - - hostCertificate, err := crypto.GenerateSSHHostCertificate(principalList, hostKeyPubSSH, ca) - if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "generating SSH host certificate: %s", err))) - } - - if err := s.fileHandler.Write(constants.SSHHostCertificatePath, ssh.MarshalAuthorizedKey(hostCertificate), file.OptMkdirAll); err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "writing ssh host certificate: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "persisting node state: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } clusterName := req.ClusterName @@ -277,9 +219,13 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe req.KubernetesComponents, req.ApiserverCertSans, req.ServiceCidr, + s.log, ) if err != nil { - return errors.Join(err, s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "initializing cluster: %s", err))) + if e := s.sendLogsWithMessage(stream, status.Errorf(codes.Internal, "initializing cluster: %s", err)); e != nil { + err = errors.Join(err, e) + } + return err } log.Info("Init succeeded") @@ -396,6 +342,7 @@ type ClusterInitializer interface { kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, + log *slog.Logger, ) ([]byte, error) } diff --git a/bootstrapper/internal/initserver/initserver_test.go b/bootstrapper/internal/initserver/initserver_test.go index cfecdd018..77a0b0817 100644 --- a/bootstrapper/internal/initserver/initserver_test.go +++ b/bootstrapper/internal/initserver/initserver_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package initserver @@ -9,12 +9,10 @@ package initserver import ( "bytes" "context" - "crypto/ed25519" - "encoding/pem" "errors" "io" + "log/slog" "net" - "os" "strings" "sync" "testing" @@ -23,7 +21,6 @@ import ( "github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto/testvector" "github.com/edgelesssys/constellation/v2/internal/file" kmssetup "github.com/edgelesssys/constellation/v2/internal/kms/setup" @@ -35,7 +32,6 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/ssh" "google.golang.org/grpc" ) @@ -71,10 +67,7 @@ func TestNew(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - server, err := New( - t.Context(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), - &stubDisk{}, fh, &tc.metadata, logger.NewTest(t), - ) + server, err := New(context.TODO(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), fh, &tc.metadata, logger.NewTest(t)) if tc.wantErr { assert.Error(err) return @@ -105,31 +98,17 @@ func TestInit(t *testing.T) { masterSecret := uri.MasterSecret{Key: []byte("secret"), Salt: []byte("salt")} - _, privkey, err := ed25519.GenerateKey(nil) - require.NoError(t, err) - pemHostKey, err := ssh.MarshalPrivateKey(privkey, "") - require.NoError(t, err) - - fsWithHostKey := afero.NewMemMapFs() - hostKeyFile, err := fsWithHostKey.Create(constants.SSHHostKeyPath) - require.NoError(t, err) - _, err = hostKeyFile.Write(pem.EncodeToMemory(pemHostKey)) - require.NoError(t, err) - require.NoError(t, hostKeyFile.Close()) - readOnlyFSWithHostKey := afero.NewReadOnlyFs(fsWithHostKey) - testCases := map[string]struct { - nodeLock *fakeLock - initializer ClusterInitializer - disk encryptedDisk - fileHandler file.Handler - req *initproto.InitRequest - stream stubStream - logCollector stubJournaldCollector - initSecretHash []byte - hostkeyDoesntExist bool - wantErr bool - wantShutdown bool + nodeLock *fakeLock + initializer ClusterInitializer + disk encryptedDisk + fileHandler file.Handler + req *initproto.InitRequest + stream stubStream + logCollector stubJournaldCollector + initSecretHash []byte + wantErr bool + wantShutdown bool }{ "successful init": { nodeLock: newFakeLock(), @@ -193,7 +172,7 @@ func TestInit(t *testing.T) { nodeLock: newFakeLock(), initializer: &stubClusterInitializer{}, disk: &stubDisk{}, - fileHandler: file.NewHandler(readOnlyFSWithHostKey), + fileHandler: file.NewHandler(afero.NewReadOnlyFs(afero.NewMemMapFs())), req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, stream: stubStream{}, logCollector: stubJournaldCollector{logPipe: &stubReadCloser{reader: bytes.NewReader([]byte{})}}, @@ -224,31 +203,11 @@ func TestInit(t *testing.T) { logCollector: stubJournaldCollector{logPipe: &stubReadCloser{reader: bytes.NewReader([]byte{})}}, wantErr: true, }, - "host key doesn't exist": { - nodeLock: newFakeLock(), - initializer: &stubClusterInitializer{}, - disk: &stubDisk{}, - fileHandler: file.NewHandler(afero.NewMemMapFs()), - initSecretHash: initSecretHash, - req: &initproto.InitRequest{InitSecret: initSecret, KmsUri: masterSecret.EncodeToURI(), StorageUri: uri.NoStoreURI}, - stream: stubStream{}, - logCollector: stubJournaldCollector{logPipe: &stubReadCloser{reader: bytes.NewReader([]byte{})}}, - hostkeyDoesntExist: true, - wantShutdown: true, - wantErr: true, - }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { assert := assert.New(t) - require := require.New(t) - - if _, err := tc.fileHandler.Stat(constants.SSHHostKeyPath); errors.Is(err, os.ErrNotExist) { - if !tc.hostkeyDoesntExist { - require.NoError(tc.fileHandler.Write(constants.SSHHostKeyPath, pem.EncodeToMemory(pemHostKey), file.OptMkdirAll)) - } - } serveStopper := newStubServeStopper() server := &Server{ @@ -391,9 +350,9 @@ func TestSetupDisk(t *testing.T) { masterSecret := uri.MasterSecret{Key: tc.masterKey, Salt: tc.salt} - cloudKms, err := kmssetup.KMS(t.Context(), uri.NoStoreURI, masterSecret.EncodeToURI()) + cloudKms, err := kmssetup.KMS(context.Background(), uri.NoStoreURI, masterSecret.EncodeToURI()) require.NoError(err) - assert.NoError(server.setupDisk(t.Context(), cloudKms)) + assert.NoError(server.setupDisk(context.Background(), cloudKms)) }) } } @@ -422,10 +381,6 @@ func (d *fakeDisk) UpdatePassphrase(passphrase string) error { return nil } -func (d *fakeDisk) MarkDiskForReset() error { - return nil -} - type stubDisk struct { openErr error uuid string @@ -447,10 +402,6 @@ func (d *stubDisk) UpdatePassphrase(string) error { return d.updatePassphraseErr } -func (d *stubDisk) MarkDiskForReset() error { - return nil -} - type stubClusterInitializer struct { initClusterKubeconfig []byte initClusterErr error @@ -458,7 +409,7 @@ type stubClusterInitializer struct { func (i *stubClusterInitializer) InitCluster( context.Context, string, string, - bool, components.Components, []string, string, + bool, components.Components, []string, string, *slog.Logger, ) ([]byte, error) { return i.initClusterKubeconfig, i.initClusterErr } diff --git a/bootstrapper/internal/joinclient/BUILD.bazel b/bootstrapper/internal/joinclient/BUILD.bazel index a5424ec2d..3b8bf70b7 100644 --- a/bootstrapper/internal/joinclient/BUILD.bazel +++ b/bootstrapper/internal/joinclient/BUILD.bazel @@ -7,8 +7,8 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/joinclient", visibility = ["//bootstrapper:__subpackages__"], deps = [ - "//bootstrapper/internal/addresses", "//bootstrapper/internal/certificate", + "//bootstrapper/internal/diskencryption", "//internal/attestation", "//internal/cloud/metadata", "//internal/constants", @@ -21,8 +21,7 @@ go_library( "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", "@io_k8s_kubernetes//cmd/kubeadm/app/constants", "@io_k8s_utils//clock", - "@org_golang_google_grpc//:grpc", - "@org_golang_x_crypto//ssh", + "@org_golang_google_grpc//:go_default_library", ], ) @@ -37,7 +36,6 @@ go_test( deps = [ "//internal/cloud/metadata", "//internal/constants", - "//internal/crypto", "//internal/file", "//internal/grpc/atlscredentials", "//internal/grpc/dialer", @@ -51,8 +49,7 @@ go_test( "@com_github_stretchr_testify//require", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", "@io_k8s_utils//clock/testing", - "@org_golang_google_grpc//:grpc", - "@org_golang_x_crypto//ssh", + "@org_golang_google_grpc//:go_default_library", "@org_uber_go_goleak//:goleak", ], ) diff --git a/bootstrapper/internal/joinclient/joinclient.go b/bootstrapper/internal/joinclient/joinclient.go index 706efe376..8f44fa115 100644 --- a/bootstrapper/internal/joinclient/joinclient.go +++ b/bootstrapper/internal/joinclient/joinclient.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -23,13 +23,13 @@ import ( "fmt" "log/slog" "net" - "os" "path/filepath" "strconv" + "sync" "time" - "github.com/edgelesssys/constellation/v2/bootstrapper/internal/addresses" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/certificate" + "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/internal/attestation" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" @@ -39,7 +39,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/spf13/afero" - "golang.org/x/crypto/ssh" "google.golang.org/grpc" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -70,19 +69,21 @@ type JoinClient struct { dialer grpcDialer joiner ClusterJoiner + cleaner cleaner metadataAPI MetadataAPI log *slog.Logger + mux sync.Mutex stopC chan struct{} stopDone chan struct{} } // New creates a new JoinClient. -func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, disk encryptedDisk, log *slog.Logger) *JoinClient { +func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *slog.Logger) *JoinClient { return &JoinClient{ nodeLock: lock, - disk: disk, + disk: diskencryption.New(), fileHandler: file.NewHandler(afero.NewOsFs()), timeout: timeout, joinTimeout: joinTimeout, @@ -92,83 +93,99 @@ func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, d joiner: joiner, metadataAPI: meta, log: log.WithGroup("join-client"), - - stopC: make(chan struct{}, 1), - stopDone: make(chan struct{}, 1), } } // Start starts the client routine. The client will make the needed API calls to join // the cluster with the role it receives from the metadata API. // After receiving the needed information, the node will join the cluster. -func (c *JoinClient) Start(cleaner cleaner) error { +// Multiple calls of start on the same client won't start a second routine if there is +// already a routine running. +func (c *JoinClient) Start(cleaner cleaner) { + c.mux.Lock() + defer c.mux.Unlock() + + if c.stopC != nil { // daemon already running + return + } + c.log.Info("Starting") + c.stopC = make(chan struct{}, 1) + c.stopDone = make(chan struct{}, 1) + c.cleaner = cleaner + ticker := c.clock.NewTicker(c.interval) - defer ticker.Stop() - defer func() { c.stopDone <- struct{}{} }() - defer c.log.Info("Client stopped") + go func() { + defer ticker.Stop() + defer func() { c.stopDone <- struct{}{} }() + defer c.log.Info("Client stopped") - diskUUID, err := c.getDiskUUID() - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to get disk UUID") - return err - } - c.diskUUID = diskUUID - - for { - err := c.getNodeMetadata() - if err == nil { - c.log.With(slog.String("role", c.role.String()), slog.String("name", c.nodeName)).Info("Received own instance metadata") - break + diskUUID, err := c.getDiskUUID() + if err != nil { + c.log.With(slog.Any("error", err)).Error("Failed to get disk UUID") + return } - c.log.With(slog.Any("error", err)).Error("Failed to retrieve instance metadata") + c.diskUUID = diskUUID - c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") - select { - case <-c.stopC: - return nil - case <-ticker.C(): + for { + err := c.getNodeMetadata() + if err == nil { + c.log.With(slog.String("role", c.role.String()), slog.String("name", c.nodeName)).Info("Received own instance metadata") + break + } + c.log.With(slog.Any("error", err)).Error("Failed to retrieve instance metadata") + + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") + select { + case <-c.stopC: + return + case <-ticker.C(): + } } - } - var ticket *joinproto.IssueJoinTicketResponse - var kubeletKey []byte + for { + err := c.tryJoinWithAvailableServices() + if err == nil { + c.log.Info("Joined successfully. Client is shutting down") + return + } else if isUnrecoverable(err) { + c.log.With(slog.Any("error", err)).Error("Unrecoverable error occurred") + // TODO(burgerdev): this should eventually lead to a full node reset + return + } + c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints") - for { - ticket, kubeletKey, err = c.tryJoinWithAvailableServices() - if err == nil { - c.log.Info("Successfully retrieved join ticket, starting Kubernetes node") - break + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") + select { + case <-c.stopC: + return + case <-ticker.C(): + } } - c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints") - - c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") - select { - case <-c.stopC: - return nil - case <-ticker.C(): - } - } - - if err := c.startNodeAndJoin(ticket, kubeletKey, cleaner); err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to start node and join cluster") - return err - } - - return nil + }() } // Stop stops the client and blocks until the client's routine is stopped. func (c *JoinClient) Stop() { + c.mux.Lock() + defer c.mux.Unlock() + + if c.stopC == nil { // daemon not running + return + } + c.log.Info("Stopping") c.stopC <- struct{}{} <-c.stopDone + c.stopC = nil + c.stopDone = nil + c.log.Info("Stopped") } -func (c *JoinClient) tryJoinWithAvailableServices() (ticket *joinproto.IssueJoinTicketResponse, kubeletKey []byte, err error) { +func (c *JoinClient) tryJoinWithAvailableServices() error { ctx, cancel := c.timeoutCtx() defer cancel() @@ -176,106 +193,75 @@ func (c *JoinClient) tryJoinWithAvailableServices() (ticket *joinproto.IssueJoin endpoint, _, err := c.metadataAPI.GetLoadBalancerEndpoint(ctx) if err != nil { - c.log.Warn("Failed to get load balancer endpoint", "err", err) + return fmt.Errorf("failed to get load balancer endpoint: %w", err) } endpoints = append(endpoints, endpoint) ips, err := c.getControlPlaneIPs(ctx) if err != nil { - c.log.Warn("Failed to get control plane IPs", "err", err) + return fmt.Errorf("failed to get control plane IPs: %w", err) } endpoints = append(endpoints, ips...) if len(endpoints) == 0 { - return nil, nil, errors.New("no control plane IPs found") + return errors.New("no control plane IPs found") } - var joinErrs error for _, endpoint := range endpoints { - ticket, kubeletKey, err := c.requestJoinTicket(net.JoinHostPort(endpoint, strconv.Itoa(constants.JoinServiceNodePort))) + err = c.join(net.JoinHostPort(endpoint, strconv.Itoa(constants.JoinServiceNodePort))) if err == nil { - return ticket, kubeletKey, nil + return nil + } + if isUnrecoverable(err) { + return err } - - joinErrs = errors.Join(joinErrs, err) } - return nil, nil, fmt.Errorf("trying to join on all endpoints %v: %w", endpoints, joinErrs) + return err } -func (c *JoinClient) requestJoinTicket(serviceEndpoint string) (ticket *joinproto.IssueJoinTicketResponse, kubeletKey []byte, err error) { +func (c *JoinClient) join(serviceEndpoint string) error { ctx, cancel := c.timeoutCtx() defer cancel() certificateRequest, kubeletKey, err := certificate.GetKubeletCertificateRequest(c.nodeName, c.validIPs) if err != nil { - return nil, nil, err + return err } - interfaces, err := net.Interfaces() - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to get network interfaces") - return nil, nil, err - } - // Needed since go doesn't implicitly convert slices of structs to slices of interfaces - interfacesForFunc := make([]addresses.NetInterface, len(interfaces)) - for i := range interfaces { - interfacesForFunc[i] = &interfaces[i] - } - - principalList, err := addresses.GetMachineNetworkAddresses(interfacesForFunc) - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to get network addresses") - return nil, nil, err - } - hostname, err := os.Hostname() - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to get hostname") - return nil, nil, err - } - principalList = append(principalList, hostname) - - hostKeyData, err := c.fileHandler.Read(constants.SSHHostKeyPath) - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to read SSH host key file") - return nil, nil, err - } - - hostKey, err := ssh.ParsePrivateKey(hostKeyData) - if err != nil { - c.log.With(slog.Any("error", err)).Error("Failed to parse SSH host key file") - return nil, nil, err - } - hostKeyPubSSH := hostKey.PublicKey() - - conn, err := c.dialer.Dial(serviceEndpoint) + conn, err := c.dialer.Dial(ctx, serviceEndpoint) if err != nil { c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Join service unreachable") - return nil, nil, fmt.Errorf("dialing join service endpoint: %w", err) + return fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() protoClient := joinproto.NewAPIClient(conn) req := &joinproto.IssueJoinTicketRequest{ - DiskUuid: c.diskUUID, - CertificateRequest: certificateRequest, - IsControlPlane: c.role == role.ControlPlane, - HostPublicKey: hostKeyPubSSH.Marshal(), - HostCertificatePrincipals: principalList, + DiskUuid: c.diskUUID, + CertificateRequest: certificateRequest, + IsControlPlane: c.role == role.ControlPlane, } - ticket, err = protoClient.IssueJoinTicket(ctx, req) + ticket, err := protoClient.IssueJoinTicket(ctx, req) if err != nil { c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Issuing join ticket failed") - return nil, nil, fmt.Errorf("issuing join ticket: %w", err) + return fmt.Errorf("issuing join ticket: %w", err) } - return ticket, kubeletKey, err + return c.startNodeAndJoin(ticket, kubeletKey) } -func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, kubeletKey []byte, cleaner cleaner) error { +func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, kubeletKey []byte) (retErr error) { ctx, cancel := context.WithTimeout(context.Background(), c.joinTimeout) defer cancel() + // If an error occurs in this func, the client cannot continue. + defer func() { + if retErr != nil { + retErr = unrecoverableError{retErr} + } + }() + clusterID, err := attestation.DeriveClusterID(ticket.MeasurementSecret, ticket.MeasurementSalt) if err != nil { return err @@ -290,11 +276,10 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, // There is already a cluster initialization in progress on // this node, so there is no need to also join the cluster, // as the initializing node is automatically part of the cluster. - c.log.Info("Node is already being initialized. Aborting join process.") - return nil + return errors.New("node is already being initialized") } - cleaner.Clean() + c.cleaner.Clean() if err := c.updateDiskPassphrase(string(ticket.StateDiskKey)); err != nil { return fmt.Errorf("updating disk passphrase: %w", err) @@ -312,14 +297,6 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, return fmt.Errorf("writing kubelet key: %w", err) } - if err := c.fileHandler.Write(constants.SSHCAKeyPath, ticket.AuthorizedCaPublicKey, file.OptMkdirAll); err != nil { - return fmt.Errorf("writing ssh ca key: %w", err) - } - - if err := c.fileHandler.Write(constants.SSHHostCertificatePath, ticket.HostCertificate, file.OptMkdirAll); err != nil { - return fmt.Errorf("writing ssh host certificate: %w", err) - } - state := nodestate.NodeState{ Role: c.role, MeasurementSalt: ticket.MeasurementSalt, @@ -336,12 +313,11 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, // We currently cannot recover from any failure in this function. Joining the k8s cluster // sometimes fails transiently, and we don't want to brick the node because of that. - for i := range 3 { - err = c.joiner.JoinCluster(ctx, btd, c.role, ticket.KubernetesComponents) - if err == nil { - break + for i := 0; i < 3; i++ { + err = c.joiner.JoinCluster(ctx, btd, c.role, ticket.KubernetesComponents, c.log) + if err != nil { + c.log.Error("failed to join k8s cluster", "role", c.role, "attempt", i, "error", err) } - c.log.Error("failed to join k8s cluster", "role", c.role, "attempt", i, "error", err) } if err != nil { return fmt.Errorf("joining Kubernetes cluster: %w", err) @@ -436,8 +412,15 @@ func (c *JoinClient) timeoutCtx() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), c.timeout) } +type unrecoverableError struct{ error } + +func isUnrecoverable(err error) bool { + _, ok := err.(unrecoverableError) + return ok +} + type grpcDialer interface { - Dial(target string) (*grpc.ClientConn, error) + Dial(ctx context.Context, target string) (*grpc.ClientConn, error) } // ClusterJoiner has the ability to join a new node to an existing cluster. @@ -448,6 +431,7 @@ type ClusterJoiner interface { args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, + log *slog.Logger, ) error } diff --git a/bootstrapper/internal/joinclient/joinclient_test.go b/bootstrapper/internal/joinclient/joinclient_test.go index 0f96edaba..d22ed4fb9 100644 --- a/bootstrapper/internal/joinclient/joinclient_test.go +++ b/bootstrapper/internal/joinclient/joinclient_test.go @@ -1,18 +1,16 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package joinclient import ( "context" - "crypto/ed25519" - "encoding/pem" "errors" + "log/slog" "net" - "os" "strconv" "sync" "testing" @@ -20,7 +18,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/grpc/atlscredentials" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" @@ -33,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/goleak" - "golang.org/x/crypto/ssh" "google.golang.org/grpc" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" testclock "k8s.io/utils/clock/testing" @@ -44,6 +40,7 @@ func TestMain(m *testing.M) { } func TestClient(t *testing.T) { + someErr := errors.New("failed") lockedLock := newFakeLock() aqcuiredLock, lockErr := lockedLock.TryLockOnce(nil) require.True(t, aqcuiredLock) @@ -56,83 +53,26 @@ func TestClient(t *testing.T) { {Role: role.ControlPlane, Name: "node-4", VPCIP: "192.0.2.2"}, {Role: role.ControlPlane, Name: "node-5", VPCIP: "192.0.2.3"}, } - caDerivationKey := make([]byte, 256) - respCaKey := &joinproto.IssueJoinTicketResponse{AuthorizedCaPublicKey: caDerivationKey} - - // TODO: fix test since keys are generated with systemd service - makeIssueJoinTicketAnswerWithValidCert := func(t *testing.T, originalAnswer issueJoinTicketAnswer, fh file.Handler) issueJoinTicketAnswer { - require := require.New(t) - - sshKeyBytes, err := fh.Read(constants.SSHHostKeyPath) - require.NoError(err) - sshKey, err := ssh.ParsePrivateKey(sshKeyBytes) - require.NoError(err) - _, randomCAKey, err := ed25519.GenerateKey(nil) - require.NoError(err) - randomCA, err := ssh.NewSignerFromSigner(randomCAKey) - require.NoError(err) - - cert, err := crypto.GenerateSSHHostCertificate([]string{"asdf"}, sshKey.PublicKey(), randomCA) - require.NoError(err) - - certBytes := ssh.MarshalAuthorizedKey(cert) - - if originalAnswer.resp == nil { - originalAnswer.resp = &joinproto.IssueJoinTicketResponse{HostCertificate: certBytes} - } else { - originalAnswer.resp.HostCertificate = certBytes - } - - return originalAnswer - } - - makeIssueJoinTicketAnswerWithInvalidCert := func(t *testing.T, originalAnswer issueJoinTicketAnswer) issueJoinTicketAnswer { - require := require.New(t) - _, randomCAKey, err := ed25519.GenerateKey(nil) - require.NoError(err) - randomCA, err := ssh.NewSignerFromSigner(randomCAKey) - require.NoError(err) - - randomKey, _, err := ed25519.GenerateKey(nil) - require.NoError(err) - randomSSHKey, err := ssh.NewPublicKey(randomKey) - require.NoError(err) - - cert, err := crypto.GenerateSSHHostCertificate([]string{"asdf"}, randomSSHKey, randomCA) - require.NoError(err) - - certBytes := ssh.MarshalAuthorizedKey(cert) - - if originalAnswer.resp == nil { - originalAnswer.resp = &joinproto.IssueJoinTicketResponse{HostCertificate: certBytes} - } else { - originalAnswer.resp.HostCertificate = certBytes - } - - return originalAnswer - } testCases := map[string]struct { - role role.Role - clusterJoiner *stubClusterJoiner - disk encryptedDisk - nodeLock *fakeLock - apiAnswers []any - wantLock bool - wantJoin bool - wantNumJoins int - wantNotMatchingCert bool - wantCertNotExisting bool + role role.Role + clusterJoiner *stubClusterJoiner + disk encryptedDisk + nodeLock *fakeLock + apiAnswers []any + wantLock bool + wantJoin bool + wantNumJoins int }{ "on worker: metadata self: errors occur": { role: role.Worker, apiAnswers: []any{ - selfAnswer{err: assert.AnError}, - selfAnswer{err: assert.AnError}, - selfAnswer{err: assert.AnError}, + selfAnswer{err: someErr}, + selfAnswer{err: someErr}, + selfAnswer{err: someErr}, selfAnswer{instance: workerSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -140,23 +80,6 @@ func TestClient(t *testing.T) { wantJoin: true, wantLock: true, }, - "on worker: SSH host cert not matching": { - role: role.Worker, - apiAnswers: []any{ - selfAnswer{err: assert.AnError}, - selfAnswer{err: assert.AnError}, - selfAnswer{err: assert.AnError}, - selfAnswer{instance: workerSelf}, - listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, - }, - clusterJoiner: &stubClusterJoiner{}, - nodeLock: newFakeLock(), - disk: &stubDisk{}, - wantJoin: true, - wantLock: true, - wantNotMatchingCert: true, - }, "on worker: metadata self: invalid answer": { role: role.Worker, apiAnswers: []any{ @@ -165,7 +88,7 @@ func TestClient(t *testing.T) { selfAnswer{instance: metadata.InstanceMetadata{Name: "node-1"}}, selfAnswer{instance: workerSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -177,11 +100,11 @@ func TestClient(t *testing.T) { role: role.Worker, apiAnswers: []any{ selfAnswer{instance: workerSelf}, - listAnswer{err: assert.AnError}, - listAnswer{err: assert.AnError}, - listAnswer{err: assert.AnError}, + listAnswer{err: someErr}, + listAnswer{err: someErr}, + listAnswer{err: someErr}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -197,7 +120,7 @@ func TestClient(t *testing.T) { listAnswer{}, listAnswer{}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -210,11 +133,11 @@ func TestClient(t *testing.T) { apiAnswers: []any{ selfAnswer{instance: workerSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{err: assert.AnError}, + issueJoinTicketAnswer{err: someErr}, listAnswer{instances: peers}, - issueJoinTicketAnswer{err: assert.AnError}, + issueJoinTicketAnswer{err: someErr}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -227,11 +150,11 @@ func TestClient(t *testing.T) { apiAnswers: []any{ selfAnswer{instance: controlSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{err: assert.AnError}, + issueJoinTicketAnswer{err: someErr}, listAnswer{instances: peers}, - issueJoinTicketAnswer{err: assert.AnError}, + issueJoinTicketAnswer{err: someErr}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, clusterJoiner: &stubClusterJoiner{}, nodeLock: newFakeLock(), @@ -244,9 +167,9 @@ func TestClient(t *testing.T) { apiAnswers: []any{ selfAnswer{instance: controlSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, - clusterJoiner: &stubClusterJoiner{numBadCalls: -1, joinClusterErr: assert.AnError}, + clusterJoiner: &stubClusterJoiner{numBadCalls: -1, joinClusterErr: someErr}, nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, @@ -257,9 +180,9 @@ func TestClient(t *testing.T) { apiAnswers: []any{ selfAnswer{instance: controlSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, - clusterJoiner: &stubClusterJoiner{numBadCalls: 1, joinClusterErr: assert.AnError}, + clusterJoiner: &stubClusterJoiner{numBadCalls: 1, joinClusterErr: someErr}, nodeLock: newFakeLock(), disk: &stubDisk{}, wantJoin: true, @@ -271,45 +194,35 @@ func TestClient(t *testing.T) { apiAnswers: []any{ selfAnswer{instance: controlSelf}, listAnswer{instances: peers}, - issueJoinTicketAnswer{resp: respCaKey}, + issueJoinTicketAnswer{}, }, - clusterJoiner: &stubClusterJoiner{}, - nodeLock: lockedLock, - disk: &stubDisk{}, - wantLock: true, - wantCertNotExisting: true, + clusterJoiner: &stubClusterJoiner{}, + nodeLock: lockedLock, + disk: &stubDisk{}, + wantLock: true, }, "on control plane: disk open fails": { - role: role.ControlPlane, - clusterJoiner: &stubClusterJoiner{}, - nodeLock: newFakeLock(), - disk: &stubDisk{openErr: assert.AnError}, - wantCertNotExisting: true, + role: role.ControlPlane, + clusterJoiner: &stubClusterJoiner{}, + nodeLock: newFakeLock(), + disk: &stubDisk{openErr: someErr}, }, "on control plane: disk uuid fails": { - role: role.ControlPlane, - clusterJoiner: &stubClusterJoiner{}, - nodeLock: newFakeLock(), - disk: &stubDisk{uuidErr: assert.AnError}, - wantCertNotExisting: true, + role: role.ControlPlane, + clusterJoiner: &stubClusterJoiner{}, + nodeLock: newFakeLock(), + disk: &stubDisk{uuidErr: someErr}, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { assert := assert.New(t) - require := require.New(t) clock := testclock.NewFakeClock(time.Now()) metadataAPI := newStubMetadataAPI() fileHandler := file.NewHandler(afero.NewMemMapFs()) - _, hostKey, err := ed25519.GenerateKey(nil) - require.NoError(err) - hostKeyPEM, err := ssh.MarshalPrivateKey(hostKey, "hostkey") - require.NoError(err) - require.NoError(fileHandler.Write(constants.SSHHostKeyPath, pem.EncodeToMemory(hostKeyPEM), file.OptMkdirAll)) - netDialer := testdialer.NewBufconnDialer() dialer := dialer.New(nil, nil, netDialer) @@ -324,9 +237,6 @@ func TestClient(t *testing.T) { metadataAPI: metadataAPI, clock: clock, log: logger.NewTest(t), - - stopC: make(chan struct{}, 1), - stopDone: make(chan struct{}, 1), } serverCreds := atlscredentials.New(nil, nil) @@ -338,7 +248,7 @@ func TestClient(t *testing.T) { go joinServer.Serve(listener) defer joinServer.GracefulStop() - go func() { _ = client.Start(stubCleaner{}) }() + client.Start(stubCleaner{}) for _, a := range tc.apiAnswers { switch a := a.(type) { @@ -347,43 +257,13 @@ func TestClient(t *testing.T) { case listAnswer: metadataAPI.listAnswerC <- a case issueJoinTicketAnswer: - var answer issueJoinTicketAnswer - if tc.wantNotMatchingCert { - answer = makeIssueJoinTicketAnswerWithInvalidCert(t, a) - } else { - answer = makeIssueJoinTicketAnswerWithValidCert(t, a, fileHandler) - } - joinserviceAPI.issueJoinTicketAnswerC <- answer + joinserviceAPI.issueJoinTicketAnswerC <- a } clock.Step(time.Second) } client.Stop() - if !tc.wantCertNotExisting { - hostCertBytes, err := fileHandler.Read(constants.SSHHostCertificatePath) - require.NoError(err) - hostKeyBytes, err := fileHandler.Read(constants.SSHHostKeyPath) - require.NoError(err) - - hostCertKey, _, _, _, err := ssh.ParseAuthorizedKey(hostCertBytes) - require.NoError(err) - hostCert, ok := hostCertKey.(*ssh.Certificate) - require.True(ok) - - hostKey, err := ssh.ParsePrivateKey(hostKeyBytes) - require.NoError(err) - - if !tc.wantNotMatchingCert { - assert.Equal(hostKey.PublicKey().Marshal(), hostCert.Key.Marshal()) - } else { - assert.NotEqual(hostKey.PublicKey().Marshal(), hostCert.Key.Marshal()) - } - } else { - _, err := fileHandler.Stat(constants.SSHHostCertificatePath) - require.True(errors.Is(err, os.ErrNotExist)) - } - if tc.wantJoin { assert.Greater(tc.clusterJoiner.joinClusterCalled, 0) } else { @@ -401,6 +281,78 @@ func TestClient(t *testing.T) { } } +func TestClientConcurrentStartStop(t *testing.T) { + netDialer := testdialer.NewBufconnDialer() + dialer := dialer.New(nil, nil, netDialer) + client := &JoinClient{ + nodeLock: newFakeLock(), + timeout: 30 * time.Second, + interval: 30 * time.Second, + dialer: dialer, + disk: &stubDisk{}, + joiner: &stubClusterJoiner{}, + fileHandler: file.NewHandler(afero.NewMemMapFs()), + metadataAPI: &stubRepeaterMetadataAPI{}, + clock: testclock.NewFakeClock(time.Now()), + log: logger.NewTest(t), + } + + wg := sync.WaitGroup{} + + start := func() { + defer wg.Done() + client.Start(stubCleaner{}) + } + + stop := func() { + defer wg.Done() + client.Stop() + } + + wg.Add(10) + go stop() + go start() + go start() + go stop() + go stop() + go start() + go start() + go stop() + go stop() + go start() + wg.Wait() + + client.Stop() +} + +func TestIsUnrecoverable(t *testing.T) { + assert := assert.New(t) + + some := errors.New("failed") + unrec := unrecoverableError{some} + assert.True(isUnrecoverable(unrec)) + assert.False(isUnrecoverable(some)) +} + +type stubRepeaterMetadataAPI struct { + selfInstance metadata.InstanceMetadata + selfErr error + listInstances []metadata.InstanceMetadata + listErr error +} + +func (s *stubRepeaterMetadataAPI) Self(_ context.Context) (metadata.InstanceMetadata, error) { + return s.selfInstance, s.selfErr +} + +func (s *stubRepeaterMetadataAPI) List(_ context.Context) ([]metadata.InstanceMetadata, error) { + return s.listInstances, s.listErr +} + +func (s *stubRepeaterMetadataAPI) GetLoadBalancerEndpoint(_ context.Context) (string, string, error) { + return "", "", nil +} + type stubMetadataAPI struct { selfAnswerC chan selfAnswer listAnswerC chan listAnswer @@ -469,7 +421,7 @@ type stubClusterJoiner struct { joinClusterErr error } -func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components) error { +func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error { j.joinClusterCalled++ if j.numBadCalls == 0 { return nil @@ -499,10 +451,6 @@ func (d *stubDisk) UpdatePassphrase(string) error { return d.updatePassphraseErr } -func (d *stubDisk) MarkDiskForReset() error { - return nil -} - type stubCleaner struct{} func (c stubCleaner) Clean() {} diff --git a/bootstrapper/internal/journald/journald.go b/bootstrapper/internal/journald/journald.go index 5e06370b2..bf040a1a0 100644 --- a/bootstrapper/internal/journald/journald.go +++ b/bootstrapper/internal/journald/journald.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/bootstrapper/internal/journald/journald_test.go b/bootstrapper/internal/journald/journald_test.go index b617d10ac..8bf022aec 100644 --- a/bootstrapper/internal/journald/journald_test.go +++ b/bootstrapper/internal/journald/journald_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package journald diff --git a/bootstrapper/internal/kubernetes/BUILD.bazel b/bootstrapper/internal/kubernetes/BUILD.bazel index 935c3fefd..d6ba14a49 100644 --- a/bootstrapper/internal/kubernetes/BUILD.bazel +++ b/bootstrapper/internal/kubernetes/BUILD.bazel @@ -11,7 +11,6 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes", visibility = ["//bootstrapper:__subpackages__"], deps = [ - "//bootstrapper/internal/etcdio", "//bootstrapper/internal/kubernetes/k8sapi", "//bootstrapper/internal/kubernetes/kubewaiter", "//internal/cloud/cloudprovider", diff --git a/bootstrapper/internal/kubernetes/cloud_provider.go b/bootstrapper/internal/kubernetes/cloud_provider.go index 8b92826c3..39023ea82 100644 --- a/bootstrapper/internal/kubernetes/cloud_provider.go +++ b/bootstrapper/internal/kubernetes/cloud_provider.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel b/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel index 968a2b011..ef87085bd 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel +++ b/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel @@ -28,7 +28,6 @@ go_library( "@io_k8s_kubelet//config/v1beta1", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", "@io_k8s_kubernetes//cmd/kubeadm/app/constants", - "@org_golang_x_mod//semver", ], ) diff --git a/bootstrapper/internal/kubernetes/k8sapi/k8sapi.go b/bootstrapper/internal/kubernetes/k8sapi/k8sapi.go index d5c1c1f35..72565b30a 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/k8sapi.go +++ b/bootstrapper/internal/kubernetes/k8sapi/k8sapi.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package k8sapi is used to interact with the Kubernetes API to create or update required resources. diff --git a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go index 1cbf88a9b..53f681b49 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package k8sapi @@ -48,6 +48,7 @@ type Client interface { AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error ListAllNamespaces(ctx context.Context) (*corev1.NamespaceList, error) AnnotateNode(ctx context.Context, nodeName, annotationKey, annotationValue string) error + EnforceCoreDNSSpread(ctx context.Context) error PatchFirstNodePodCIDR(ctx context.Context, firstNodePodCIDR string) error } @@ -87,6 +88,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesCompon func (k *KubernetesUtil) InitCluster( ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger, ) ([]byte, error) { + // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { return nil, fmt.Errorf("generating default audit policy: %w", err) @@ -135,12 +137,6 @@ func (k *KubernetesUtil) InitCluster( } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - // If the node rebooted after the static pod directory was created, - // the existing directory needs to be removed before we can - // try to init the cluster again. - if err := os.RemoveAll("/etc/kubernetes/manifests"); err != nil { - return nil, fmt.Errorf("removing static pods directory: %w", err) - } log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return nil, fmt.Errorf("creating static pods directory: %w", err) @@ -148,7 +144,7 @@ func (k *KubernetesUtil) InitCluster( // initialize the cluster log.Info("Initializing the cluster using kubeadm init") - skipPhases := "--skip-phases=preflight,certs,addon/coredns" + skipPhases := "--skip-phases=preflight,certs" if !conformanceMode { skipPhases += ",addon/kube-proxy" } @@ -185,6 +181,7 @@ func (k *KubernetesUtil) InitCluster( // JoinCluster joins existing Kubernetes cluster using kubeadm join. func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error { + // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { return fmt.Errorf("generating default audit policy: %w", err) @@ -203,12 +200,6 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - // If the node rebooted after the static pod directory was created, for example - // if a failure during an upgrade occurred, the existing directory needs to be - // removed before we can try to join the cluster again. - if err := os.RemoveAll("/etc/kubernetes/manifests"); err != nil { - return fmt.Errorf("removing static pods directory: %w", err) - } log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return fmt.Errorf("creating static pods directory: %w", err) diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go index ca431441e..430839a46 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package k8sapi @@ -12,7 +12,6 @@ import ( "github.com/edgelesssys/constellation/v2/bootstrapper/internal/certificate" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/kubernetes" - "golang.org/x/mod/semver" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconf "k8s.io/kubelet/config/v1beta1" @@ -39,7 +38,7 @@ func (c *KubdeadmConfiguration) InitConfiguration(externalCloudProvider bool, cl cloudProvider = "external" } - initConfig := KubeadmInitYAML{ + return KubeadmInitYAML{ InitConfiguration: kubeadm.InitConfiguration{ TypeMeta: metav1.TypeMeta{ APIVersion: kubeadm.SchemeGroupVersion.String(), @@ -158,11 +157,6 @@ func (c *KubdeadmConfiguration) InitConfiguration(externalCloudProvider bool, cl TLSPrivateKeyFile: certificate.KeyFilename, }, } - - if semver.Compare(clusterVersion, "v1.31.0") >= 0 { - initConfig.ClusterConfiguration.FeatureGates = map[string]bool{"ControlPlaneKubeletLocalMode": true} - } - return initConfig } // JoinConfiguration returns a new kubeadm join configuration. diff --git a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go index bf9b68a64..6fef72a88 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go +++ b/bootstrapper/internal/kubernetes/k8sapi/kubeadm_config_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package k8sapi diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy.go b/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy.go index 9968c982f..8d3679d0a 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package resources diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy_test.go b/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy_test.go index 3f9d08914..6889cec05 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy_test.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/auditpolicy_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package resources diff --git a/bootstrapper/internal/kubernetes/k8sapi/resources/resources.go b/bootstrapper/internal/kubernetes/k8sapi/resources/resources.go index b7d4bd86f..d7dc58c49 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/resources/resources.go +++ b/bootstrapper/internal/kubernetes/k8sapi/resources/resources.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package resources contains Kubernetes configs and policies for Constellation. diff --git a/bootstrapper/internal/kubernetes/k8sapi/systemd.go b/bootstrapper/internal/kubernetes/k8sapi/systemd.go index c12484737..7ac800f3a 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/systemd.go +++ b/bootstrapper/internal/kubernetes/k8sapi/systemd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package k8sapi diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index 3752d8087..1faf6c3cf 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index 4c09ed783..5ef1f4637 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kubernetes provides functionality to bootstrap a Kubernetes cluster, or join an exiting one. @@ -16,7 +16,6 @@ import ( "strings" "time" - "github.com/edgelesssys/constellation/v2/bootstrapper/internal/etcdio" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubewaiter" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -41,48 +40,37 @@ type kubeAPIWaiter interface { Wait(ctx context.Context, kubernetesClient kubewaiter.KubernetesClient) error } -type etcdIOPrioritizer interface { - PrioritizeIO() -} - // KubeWrapper implements Cluster interface. type KubeWrapper struct { - cloudProvider string - clusterUtil clusterUtil - kubeAPIWaiter kubeAPIWaiter - configProvider configurationProvider - client k8sapi.Client - providerMetadata ProviderMetadata - etcdIOPrioritizer etcdIOPrioritizer - getIPAddr func() (string, error) - - log *slog.Logger + cloudProvider string + clusterUtil clusterUtil + kubeAPIWaiter kubeAPIWaiter + configProvider configurationProvider + client k8sapi.Client + providerMetadata ProviderMetadata + getIPAddr func() (string, error) } // New creates a new KubeWrapper with real values. func New(cloudProvider string, clusterUtil clusterUtil, configProvider configurationProvider, client k8sapi.Client, - providerMetadata ProviderMetadata, kubeAPIWaiter kubeAPIWaiter, log *slog.Logger, + providerMetadata ProviderMetadata, kubeAPIWaiter kubeAPIWaiter, ) *KubeWrapper { - etcdIOPrioritizer := etcdio.NewClient(log) - return &KubeWrapper{ - cloudProvider: cloudProvider, - clusterUtil: clusterUtil, - kubeAPIWaiter: kubeAPIWaiter, - configProvider: configProvider, - client: client, - providerMetadata: providerMetadata, - getIPAddr: getIPAddr, - log: log, - etcdIOPrioritizer: etcdIOPrioritizer, + cloudProvider: cloudProvider, + clusterUtil: clusterUtil, + kubeAPIWaiter: kubeAPIWaiter, + configProvider: configProvider, + client: client, + providerMetadata: providerMetadata, + getIPAddr: getIPAddr, } } // InitCluster initializes a new Kubernetes cluster and applies pod network provider. func (k *KubeWrapper) InitCluster( - ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, + ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *slog.Logger, ) ([]byte, error) { - k.log.With(slog.String("version", versionString)).Info("Installing Kubernetes components") + log.With(slog.String("version", versionString)).Info("Installing Kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil { return nil, err } @@ -90,7 +78,7 @@ func (k *KubeWrapper) InitCluster( var validIPs []net.IP // Step 1: retrieve cloud metadata for Kubernetes configuration - k.log.Info("Retrieving node metadata") + log.Info("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { return nil, fmt.Errorf("retrieving own instance metadata: %w", err) @@ -118,7 +106,7 @@ func (k *KubeWrapper) InitCluster( certSANs := []string{nodeIP} certSANs = append(certSANs, apiServerCertSANs...) - k.log.With( + log.With( slog.String("nodeName", nodeName), slog.String("providerID", instance.ProviderID), slog.String("nodeIP", nodeIP), @@ -144,16 +132,12 @@ func (k *KubeWrapper) InitCluster( if err != nil { return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) } - - k.log.Info("Initializing Kubernetes cluster") - kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, conformanceMode, k.log) + log.Info("Initializing Kubernetes cluster") + kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, conformanceMode, log) if err != nil { return nil, fmt.Errorf("kubeadm init: %w", err) } - k.log.Info("Prioritizing etcd I/O") - k.etcdIOPrioritizer.PrioritizeIO() - err = k.client.Initialize(kubeConfig) if err != nil { return nil, fmt.Errorf("initializing kubectl client: %w", err) @@ -165,6 +149,10 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("waiting for Kubernetes API to be available: %w", err) } + if err := k.client.EnforceCoreDNSSpread(ctx); err != nil { + return nil, fmt.Errorf("configuring CoreDNS deployment: %w", err) + } + // Setup the K8s components ConfigMap. k8sComponentsConfigMap, err := k.setupK8sComponentsConfigMap(ctx, kubernetesComponents, versionString) if err != nil { @@ -189,23 +177,22 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("annotating node with Kubernetes components hash: %w", err) } - k.log.Info("Setting up internal-config ConfigMap") + log.Info("Setting up internal-config ConfigMap") if err := k.setupInternalConfigMap(ctx); err != nil { return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err) } - return kubeConfig, nil } // JoinCluster joins existing Kubernetes cluster. -func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components) error { - k.log.With("k8sComponents", k8sComponents).Info("Installing provided kubernetes components") +func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error { + log.With("k8sComponents", k8sComponents).Info("Installing provided kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil { return fmt.Errorf("installing kubernetes components: %w", err) } // Step 1: retrieve cloud metadata for Kubernetes configuration - k.log.Info("Retrieving node metadata") + log.Info("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { return fmt.Errorf("retrieving own instance metadata: %w", err) @@ -225,7 +212,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo // override join endpoint to go over lb args.APIServerEndpoint = net.JoinHostPort(loadBalancerHost, loadBalancerPort) - k.log.With( + log.With( slog.String("nodeName", nodeName), slog.String("providerID", providerID), slog.String("nodeIP", nodeInternalIP), @@ -250,18 +237,11 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo if err != nil { return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err) } - - k.log.With(slog.String("apiServerEndpoint", args.APIServerEndpoint)).Info("Joining Kubernetes cluster") - if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, k.log); err != nil { + log.With(slog.String("apiServerEndpoint", args.APIServerEndpoint)).Info("Joining Kubernetes cluster") + if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, log); err != nil { return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } - // If on control plane (and thus with etcd), try to prioritize etcd I/O. - if peerRole == role.ControlPlane { - k.log.Info("Prioritizing etcd I/O") - k.etcdIOPrioritizer.PrioritizeIO() - } - return nil } @@ -321,8 +301,6 @@ func (k *KubeWrapper) StartKubelet() error { return fmt.Errorf("starting kubelet: %w", err) } - k.etcdIOPrioritizer.PrioritizeIO() - return nil } diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 02051bd5d..ccc3a107c 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes @@ -42,19 +42,17 @@ func TestInitCluster(t *testing.T) { aliasIPRange := "192.0.2.0/24" testCases := map[string]struct { - clusterUtil stubClusterUtil - kubectl stubKubectl - kubeAPIWaiter stubKubeAPIWaiter - providerMetadata ProviderMetadata - wantConfig k8sapi.KubeadmInitYAML - etcdIOPrioritizer stubEtcdIOPrioritizer - wantErr bool - k8sVersion versions.ValidK8sVersion + clusterUtil stubClusterUtil + kubectl stubKubectl + kubeAPIWaiter stubKubeAPIWaiter + providerMetadata ProviderMetadata + wantConfig k8sapi.KubeadmInitYAML + wantErr bool + k8sVersion versions.ValidK8sVersion }{ "kubeadm init works with metadata and loadbalancer": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ Name: nodeName, @@ -87,9 +85,8 @@ func TestInitCluster(t *testing.T) { k8sVersion: versions.Default, }, "kubeadm init fails when annotating itself": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ Name: nodeName, @@ -105,9 +102,8 @@ func TestInitCluster(t *testing.T) { k8sVersion: versions.Default, }, "kubeadm init fails when retrieving metadata self": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, providerMetadata: &stubProviderMetadata{ selfErr: assert.AnError, }, @@ -115,8 +111,7 @@ func TestInitCluster(t *testing.T) { k8sVersion: versions.Default, }, "kubeadm init fails when retrieving metadata loadbalancer ip": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, providerMetadata: &stubProviderMetadata{ getLoadBalancerEndpointErr: assert.AnError, }, @@ -128,58 +123,51 @@ func TestInitCluster(t *testing.T) { initClusterErr: assert.AnError, kubeconfig: []byte("someKubeconfig"), }, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - wantErr: true, - k8sVersion: versions.Default, + kubeAPIWaiter: stubKubeAPIWaiter{}, + providerMetadata: &stubProviderMetadata{}, + wantErr: true, + k8sVersion: versions.Default, }, "kubeadm init fails when deploying cilium": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - wantErr: true, - k8sVersion: versions.Default, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + providerMetadata: &stubProviderMetadata{}, + wantErr: true, + k8sVersion: versions.Default, }, "kubeadm init fails when setting up constellation-services chart": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - wantErr: true, - k8sVersion: versions.Default, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, + providerMetadata: &stubProviderMetadata{}, + wantErr: true, + k8sVersion: versions.Default, }, "kubeadm init fails when reading kubeconfig": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - wantErr: true, - k8sVersion: versions.Default, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, + providerMetadata: &stubProviderMetadata{}, + wantErr: true, + k8sVersion: versions.Default, }, "kubeadm init fails when setting up verification service": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - wantErr: true, - k8sVersion: versions.Default, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, + providerMetadata: &stubProviderMetadata{}, + wantErr: true, + k8sVersion: versions.Default, }, "kubeadm init fails when waiting for kubeAPI server": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{waitErr: assert.AnError}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - k8sVersion: versions.Default, - wantErr: true, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{waitErr: assert.AnError}, + providerMetadata: &stubProviderMetadata{}, + k8sVersion: versions.Default, + wantErr: true, }, "unsupported k8sVersion fails cluster creation": { - clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, - kubeAPIWaiter: stubKubeAPIWaiter{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - k8sVersion: "1.19", - wantErr: true, + clusterUtil: stubClusterUtil{kubeconfig: []byte("someKubeconfig")}, + kubeAPIWaiter: stubKubeAPIWaiter{}, + providerMetadata: &stubProviderMetadata{}, + k8sVersion: "1.19", + wantErr: true, }, } @@ -189,20 +177,18 @@ func TestInitCluster(t *testing.T) { require := require.New(t) kube := KubeWrapper{ - cloudProvider: "aws", // provide a valid cloud provider for cilium installation - clusterUtil: &tc.clusterUtil, - providerMetadata: tc.providerMetadata, - kubeAPIWaiter: &tc.kubeAPIWaiter, - configProvider: &stubConfigProvider{initConfig: k8sapi.KubeadmInitYAML{}}, - client: &tc.kubectl, - getIPAddr: func() (string, error) { return privateIP, nil }, - etcdIOPrioritizer: &tc.etcdIOPrioritizer, - log: logger.NewTest(t), + cloudProvider: "aws", // provide a valid cloud provider for cilium installation + clusterUtil: &tc.clusterUtil, + providerMetadata: tc.providerMetadata, + kubeAPIWaiter: &tc.kubeAPIWaiter, + configProvider: &stubConfigProvider{initConfig: k8sapi.KubeadmInitYAML{}}, + client: &tc.kubectl, + getIPAddr: func() (string, error) { return privateIP, nil }, } _, err := kube.InitCluster( - t.Context(), string(tc.k8sVersion), "kubernetes", - false, nil, nil, "", + context.Background(), string(tc.k8sVersion), "kubernetes", + false, nil, nil, "", logger.NewTest(t), ) if tc.wantErr { @@ -238,17 +224,15 @@ func TestJoinCluster(t *testing.T) { } testCases := map[string]struct { - clusterUtil stubClusterUtil - providerMetadata ProviderMetadata - wantConfig kubeadm.JoinConfiguration - role role.Role - k8sComponents components.Components - etcdIOPrioritizer stubEtcdIOPrioritizer - wantErr bool + clusterUtil stubClusterUtil + providerMetadata ProviderMetadata + wantConfig kubeadm.JoinConfiguration + role role.Role + k8sComponents components.Components + wantErr bool }{ "kubeadm join worker works with metadata and remote Kubernetes Components": { - clusterUtil: stubClusterUtil{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ ProviderID: "provider-id", @@ -269,8 +253,7 @@ func TestJoinCluster(t *testing.T) { }, }, "kubeadm join worker works with metadata and local Kubernetes components": { - clusterUtil: stubClusterUtil{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ ProviderID: "provider-id", @@ -290,8 +273,7 @@ func TestJoinCluster(t *testing.T) { }, }, "kubeadm join worker works with metadata and cloud controller manager": { - clusterUtil: stubClusterUtil{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ ProviderID: "provider-id", @@ -311,8 +293,7 @@ func TestJoinCluster(t *testing.T) { }, }, "kubeadm join control-plane node works with metadata": { - clusterUtil: stubClusterUtil{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ ProviderID: "provider-id", @@ -339,8 +320,7 @@ func TestJoinCluster(t *testing.T) { }, }, "kubeadm join worker fails when installing remote Kubernetes components": { - clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{installComponentsErr: errors.New("error")}, providerMetadata: &stubProviderMetadata{ selfResp: metadata.InstanceMetadata{ ProviderID: "provider-id", @@ -353,8 +333,7 @@ func TestJoinCluster(t *testing.T) { wantErr: true, }, "kubeadm join worker fails when retrieving self metadata": { - clusterUtil: stubClusterUtil{}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, + clusterUtil: stubClusterUtil{}, providerMetadata: &stubProviderMetadata{ selfErr: assert.AnError, }, @@ -362,11 +341,10 @@ func TestJoinCluster(t *testing.T) { wantErr: true, }, "kubeadm join worker fails when applying the join config": { - clusterUtil: stubClusterUtil{joinClusterErr: assert.AnError}, - etcdIOPrioritizer: stubEtcdIOPrioritizer{}, - providerMetadata: &stubProviderMetadata{}, - role: role.Worker, - wantErr: true, + clusterUtil: stubClusterUtil{joinClusterErr: assert.AnError}, + providerMetadata: &stubProviderMetadata{}, + role: role.Worker, + wantErr: true, }, } @@ -376,15 +354,13 @@ func TestJoinCluster(t *testing.T) { require := require.New(t) kube := KubeWrapper{ - clusterUtil: &tc.clusterUtil, - providerMetadata: tc.providerMetadata, - configProvider: &stubConfigProvider{}, - getIPAddr: func() (string, error) { return privateIP, nil }, - etcdIOPrioritizer: &tc.etcdIOPrioritizer, - log: logger.NewTest(t), + clusterUtil: &tc.clusterUtil, + providerMetadata: tc.providerMetadata, + configProvider: &stubConfigProvider{}, + getIPAddr: func() (string, error) { return privateIP, nil }, } - err := kube.JoinCluster(t.Context(), joinCommand, tc.role, tc.k8sComponents) + err := kube.JoinCluster(context.Background(), joinCommand, tc.role, tc.k8sComponents, logger.NewTest(t)) if tc.wantErr { assert.Error(err) return @@ -569,7 +545,3 @@ type stubKubeAPIWaiter struct { func (s *stubKubeAPIWaiter) Wait(_ context.Context, _ kubewaiter.KubernetesClient) error { return s.waitErr } - -type stubEtcdIOPrioritizer struct{} - -func (s *stubEtcdIOPrioritizer) PrioritizeIO() {} diff --git a/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter.go b/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter.go index 956f8dd0d..64ef2abfc 100644 --- a/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter.go +++ b/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kubewaiter is used to wait for the Kubernetes API to be available. diff --git a/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter_test.go b/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter_test.go index c284bf3aa..fe51e2dbb 100644 --- a/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter_test.go +++ b/bootstrapper/internal/kubernetes/kubewaiter/kubewaiter_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubewaiter @@ -39,7 +39,7 @@ func TestCloudKubeAPIWaiter(t *testing.T) { require := require.New(t) waiter := &CloudKubeAPIWaiter{} - ctx, cancel := context.WithTimeout(t.Context(), 0) + ctx, cancel := context.WithTimeout(context.Background(), 0) defer cancel() err := waiter.Wait(ctx, tc.kubeClient) if tc.wantErr { diff --git a/bootstrapper/internal/logging/logger.go b/bootstrapper/internal/logging/logger.go index a24a5a24f..b14f5668f 100644 --- a/bootstrapper/internal/logging/logger.go +++ b/bootstrapper/internal/logging/logger.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package logging provides an interface for logging information to a non-confidential destination diff --git a/bootstrapper/internal/nodelock/nodelock.go b/bootstrapper/internal/nodelock/nodelock.go index 973877493..2a3865c8d 100644 --- a/bootstrapper/internal/nodelock/nodelock.go +++ b/bootstrapper/internal/nodelock/nodelock.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package nodelock handles locking operations on the node. diff --git a/bootstrapper/internal/nodelock/nodelock_test.go b/bootstrapper/internal/nodelock/nodelock_test.go index 967432d6a..c5738fec1 100644 --- a/bootstrapper/internal/nodelock/nodelock_test.go +++ b/bootstrapper/internal/nodelock/nodelock_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nodelock diff --git a/bootstrapper/internal/reboot/BUILD.bazel b/bootstrapper/internal/reboot/BUILD.bazel deleted file mode 100644 index ce71293b3..000000000 --- a/bootstrapper/internal/reboot/BUILD.bazel +++ /dev/null @@ -1,11 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "reboot", - srcs = [ - "reboot_cross.go", - "reboot_linux.go", - ], - importpath = "github.com/edgelesssys/constellation/v2/bootstrapper/internal/reboot", - visibility = ["//bootstrapper:__subpackages__"], -) diff --git a/bootstrapper/internal/reboot/reboot_cross.go b/bootstrapper/internal/reboot/reboot_cross.go deleted file mode 100644 index 48c638c40..000000000 --- a/bootstrapper/internal/reboot/reboot_cross.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package reboot - -// Reboot is not implemented on non-Linux platforms. -func Reboot(_ error) { - panic("reboot not implemented on non-Linux platforms") -} diff --git a/bootstrapper/internal/reboot/reboot_linux.go b/bootstrapper/internal/reboot/reboot_linux.go deleted file mode 100644 index 7b2b8847e..000000000 --- a/bootstrapper/internal/reboot/reboot_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package reboot - -import ( - "log/syslog" - "syscall" - "time" -) - -// Reboot writes an error message to the system log and reboots the system. -// We call this instead of os.Exit() since failures in the bootstrapper usually require a node reset. -func Reboot(e error) { - syslogWriter, err := syslog.New(syslog.LOG_EMERG|syslog.LOG_KERN, "bootstrapper") - if err != nil { - _ = syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) - } - _ = syslogWriter.Err(e.Error()) - _ = syslogWriter.Emerg("bootstrapper has encountered a non recoverable error. Rebooting...") - time.Sleep(time.Minute) // sleep to allow the message to be written to syslog and seen by the user - - _ = syscall.Reboot(syscall.LINUX_REBOOT_CMD_RESTART) -} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 1826812fa..6baaf3f1f 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -61,7 +61,6 @@ func NewRootCmd() *cobra.Command { rootCmd.AddCommand(cmd.NewIAMCmd()) rootCmd.AddCommand(cmd.NewVersionCmd()) rootCmd.AddCommand(cmd.NewInitCmd()) - rootCmd.AddCommand(cmd.NewSSHCmd()) rootCmd.AddCommand(cmd.NewMaaPatchCmd()) return rootCmd diff --git a/cli/internal/cloudcmd/apply.go b/cli/internal/cloudcmd/apply.go index c3b9210c1..59e07de51 100644 --- a/cli/internal/cloudcmd/apply.go +++ b/cli/internal/cloudcmd/apply.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/apply_test.go b/cli/internal/cloudcmd/apply_test.go index f64b6afb9..47217362f 100644 --- a/cli/internal/cloudcmd/apply_test.go +++ b/cli/internal/cloudcmd/apply_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -185,14 +185,14 @@ func TestApplier(t *testing.T) { out: &bytes.Buffer{}, } - diff, err := applier.Plan(t.Context(), tc.config) + diff, err := applier.Plan(context.Background(), tc.config) if err != nil { assert.True(tc.wantErr, "unexpected error: %s", err) return } assert.False(diff) - idFile, err := applier.Apply(t.Context(), tc.provider, tc.config.GetAttestationConfig().GetVariant(), true) + idFile, err := applier.Apply(context.Background(), tc.provider, tc.config.GetAttestationConfig().GetVariant(), true) if tc.wantErr { assert.Error(err) @@ -303,7 +303,7 @@ func TestPlan(t *testing.T) { cfg := config.Default() cfg.RemoveProviderAndAttestationExcept(cloudprovider.Azure) - diff, err := u.Plan(t.Context(), cfg) + diff, err := u.Plan(context.Background(), cfg) if tc.wantErr { require.Error(err) } else { @@ -352,7 +352,7 @@ func TestApply(t *testing.T) { out: io.Discard, } - _, err := u.Apply(t.Context(), cloudprovider.QEMU, variant.QEMUVTPM{}, WithoutRollbackOnError) + _, err := u.Apply(context.Background(), cloudprovider.QEMU, variant.QEMUVTPM{}, WithoutRollbackOnError) if tc.wantErr { assert.Error(err) } else { diff --git a/cli/internal/cloudcmd/clients.go b/cli/internal/cloudcmd/clients.go index 897610f80..75858f19c 100644 --- a/cli/internal/cloudcmd/clients.go +++ b/cli/internal/cloudcmd/clients.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/clients_test.go b/cli/internal/cloudcmd/clients_test.go index fa6985ab1..770907a39 100644 --- a/cli/internal/cloudcmd/clients_test.go +++ b/cli/internal/cloudcmd/clients_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/cloudcmd.go b/cli/internal/cloudcmd/cloudcmd.go index 17ce6bc93..12c6e2879 100644 --- a/cli/internal/cloudcmd/cloudcmd.go +++ b/cli/internal/cloudcmd/cloudcmd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/cli/internal/cloudcmd/iam.go b/cli/internal/cloudcmd/iam.go index c02116775..e5902c842 100644 --- a/cli/internal/cloudcmd/iam.go +++ b/cli/internal/cloudcmd/iam.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -91,12 +91,10 @@ type GCPIAMConfig struct { Zone string ProjectID string ServiceAccountID string - NamePrefix string } // AzureIAMConfig holds the necessary values for Azure IAM configuration. type AzureIAMConfig struct { - SubscriptionID string Location string ServicePrincipal string ResourceGroup string @@ -142,7 +140,6 @@ func (c *IAMCreator) createGCP(ctx context.Context, cl tfIAMClient, opts *IAMCon vars := terraform.GCPIAMVariables{ ServiceAccountID: opts.GCP.ServiceAccountID, - NamePrefix: opts.GCP.NamePrefix, Project: opts.GCP.ProjectID, Region: opts.GCP.Region, Zone: opts.GCP.Zone, @@ -160,8 +157,7 @@ func (c *IAMCreator) createGCP(ctx context.Context, cl tfIAMClient, opts *IAMCon return IAMOutput{ CloudProvider: cloudprovider.GCP, GCPOutput: GCPIAMOutput{ - ServiceAccountKey: iamOutput.GCP.SaKey, - IAMServiceAccountVM: iamOutput.GCP.ServiceAccountVMMailAddress, + ServiceAccountKey: iamOutput.GCP.SaKey, }, }, nil } @@ -171,7 +167,6 @@ func (c *IAMCreator) createAzure(ctx context.Context, cl tfIAMClient, opts *IAMC defer rollbackOnError(c.out, &retErr, &rollbackerTerraform{client: cl}, opts.TFLogLevel) vars := terraform.AzureIAMVariables{ - SubscriptionID: opts.Azure.SubscriptionID, Location: opts.Azure.Location, ResourceGroup: opts.Azure.ResourceGroup, ServicePrincipal: opts.Azure.ServicePrincipal, @@ -235,8 +230,7 @@ type IAMOutput struct { // GCPIAMOutput contains the output information of a GCP IAM configuration. type GCPIAMOutput struct { - ServiceAccountKey string `json:"serviceAccountID,omitempty"` - IAMServiceAccountVM string `json:"iamServiceAccountVM,omitempty"` + ServiceAccountKey string `json:"serviceAccountID,omitempty"` } // AzureIAMOutput contains the output information of a Microsoft Azure IAM configuration. diff --git a/cli/internal/cloudcmd/iam_test.go b/cli/internal/cloudcmd/iam_test.go index be865bbd2..ff198c51c 100644 --- a/cli/internal/cloudcmd/iam_test.go +++ b/cli/internal/cloudcmd/iam_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -128,7 +128,7 @@ func TestIAMCreator(t *testing.T) { }, } - idFile, err := creator.Create(t.Context(), tc.provider, tc.config) + idFile, err := creator.Create(context.Background(), tc.provider, tc.config) if tc.wantErr { assert.Error(err) @@ -184,7 +184,7 @@ func TestDestroyIAMConfiguration(t *testing.T) { return tc.tfClient, nil }} - err := destroyer.DestroyIAMConfiguration(t.Context(), "", terraform.LogLevelNone) + err := destroyer.DestroyIAMConfiguration(context.Background(), "", terraform.LogLevelNone) if tc.wantErr { assert.Error(err) @@ -278,7 +278,7 @@ func TestGetTfstateServiceAccountKey(t *testing.T) { return tc.cl, nil }} - saKey, err := destroyer.GetTfStateServiceAccountKey(t.Context(), "") + saKey, err := destroyer.GetTfStateServiceAccountKey(context.Background(), "") if tc.wantErr { assert.Error(err) diff --git a/cli/internal/cloudcmd/iamupgrade.go b/cli/internal/cloudcmd/iamupgrade.go index 366b771ca..729af5d29 100644 --- a/cli/internal/cloudcmd/iamupgrade.go +++ b/cli/internal/cloudcmd/iamupgrade.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/rollback.go b/cli/internal/cloudcmd/rollback.go index bdf14e42c..7d894cd2f 100644 --- a/cli/internal/cloudcmd/rollback.go +++ b/cli/internal/cloudcmd/rollback.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/rollback_test.go b/cli/internal/cloudcmd/rollback_test.go index 85ad8d3f7..320dd1745 100644 --- a/cli/internal/cloudcmd/rollback_test.go +++ b/cli/internal/cloudcmd/rollback_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd import ( "bytes" + "context" "errors" "testing" @@ -45,7 +46,7 @@ func TestRollbackTerraform(t *testing.T) { } destroyClusterErrOutput := &bytes.Buffer{} - err := rollbacker.rollback(t.Context(), destroyClusterErrOutput, terraform.LogLevelNone) + err := rollbacker.rollback(context.Background(), destroyClusterErrOutput, terraform.LogLevelNone) if tc.wantCleanupErr { assert.Error(err) if tc.tfClient.cleanUpWorkspaceErr == nil { @@ -106,7 +107,7 @@ func TestRollbackQEMU(t *testing.T) { destroyClusterErrOutput := &bytes.Buffer{} - err := rollbacker.rollback(t.Context(), destroyClusterErrOutput, terraform.LogLevelNone) + err := rollbacker.rollback(context.Background(), destroyClusterErrOutput, terraform.LogLevelNone) if tc.wantErr { assert.Error(err) if tc.tfClient.cleanUpWorkspaceErr == nil { diff --git a/cli/internal/cloudcmd/serviceaccount.go b/cli/internal/cloudcmd/serviceaccount.go index 81a15dbe3..7c54a0b9f 100644 --- a/cli/internal/cloudcmd/serviceaccount.go +++ b/cli/internal/cloudcmd/serviceaccount.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/terminate.go b/cli/internal/cloudcmd/terminate.go index 3cb9cccaa..4005afa9a 100644 --- a/cli/internal/cloudcmd/terminate.go +++ b/cli/internal/cloudcmd/terminate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/terminate_test.go b/cli/internal/cloudcmd/terminate_test.go index 1b72b3458..1d9f0232c 100644 --- a/cli/internal/cloudcmd/terminate_test.go +++ b/cli/internal/cloudcmd/terminate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -63,7 +63,7 @@ func TestTerminator(t *testing.T) { }, } - err := terminator.Terminate(t.Context(), "", terraform.LogLevelNone) + err := terminator.Terminate(context.Background(), "", terraform.LogLevelNone) if tc.wantErr { assert.Error(err) diff --git a/cli/internal/cloudcmd/tfplan.go b/cli/internal/cloudcmd/tfplan.go index 7fed5de2a..ddcccc72d 100644 --- a/cli/internal/cloudcmd/tfplan.go +++ b/cli/internal/cloudcmd/tfplan.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cloudcmd/tfplan_test.go b/cli/internal/cloudcmd/tfplan_test.go index f4a2b4f5d..3cad299c1 100644 --- a/cli/internal/cloudcmd/tfplan_test.go +++ b/cli/internal/cloudcmd/tfplan_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -101,7 +101,7 @@ func TestTFPlan(t *testing.T) { fs := tc.prepareFs(require.New(t)) hasDiff, planErr := plan( - t.Context(), tc.tf, fs, io.Discard, terraform.LogLevelDebug, + context.Background(), tc.tf, fs, io.Discard, terraform.LogLevelDebug, &terraform.QEMUVariables{}, templateDir, existingWorkspace, backupDir, ) diff --git a/cli/internal/cloudcmd/tfvars.go b/cli/internal/cloudcmd/tfvars.go index 0bcbb690e..309632d98 100644 --- a/cli/internal/cloudcmd/tfvars.go +++ b/cli/internal/cloudcmd/tfvars.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd @@ -104,7 +104,6 @@ func awsTerraformVars(conf *config.Config, imageRef string) *terraform.AWSCluste EnableSNP: conf.GetAttestationConfig().GetVariant().Equal(variant.AWSSEVSNP{}), CustomEndpoint: conf.CustomEndpoint, InternalLoadBalancer: conf.InternalLoadBalancer, - AdditionalTags: conf.Tags, } } @@ -147,7 +146,6 @@ func azureTerraformVars(conf *config.Config, imageRef string) (*terraform.AzureC } } vars := &terraform.AzureClusterVariables{ - SubscriptionID: conf.Provider.Azure.SubscriptionID, Name: conf.Name, NodeGroups: nodeGroups, Location: conf.Provider.Azure.Location, @@ -160,7 +158,6 @@ func azureTerraformVars(conf *config.Config, imageRef string) (*terraform.AzureC CustomEndpoint: conf.CustomEndpoint, InternalLoadBalancer: conf.InternalLoadBalancer, MarketplaceImage: nil, - AdditionalTags: conf.Tags, } if conf.UseMarketplaceImage() { @@ -192,7 +189,6 @@ func azureTerraformVars(conf *config.Config, imageRef string) (*terraform.AzureC func azureTerraformIAMVars(conf *config.Config, oldVars terraform.AzureIAMVariables) *terraform.AzureIAMVariables { return &terraform.AzureIAMVariables{ - SubscriptionID: conf.Provider.Azure.SubscriptionID, Location: conf.Provider.Azure.Location, ServicePrincipal: oldVars.ServicePrincipal, ResourceGroup: conf.Provider.Azure.ResourceGroup, @@ -213,12 +209,6 @@ func gcpTerraformVars(conf *config.Config, imageRef string) *terraform.GCPCluste DiskType: group.StateDiskType, } } - - ccTech := "SEV" - if conf.GetAttestationConfig().GetVariant().Equal(variant.GCPSEVSNP{}) { - ccTech = "SEV_SNP" - } - return &terraform.GCPClusterVariables{ Name: conf.Name, NodeGroups: nodeGroups, @@ -229,9 +219,6 @@ func gcpTerraformVars(conf *config.Config, imageRef string) *terraform.GCPCluste Debug: conf.IsDebugCluster(), CustomEndpoint: conf.CustomEndpoint, InternalLoadBalancer: conf.InternalLoadBalancer, - CCTechnology: ccTech, - AdditionalLabels: conf.Tags, - IAMServiceAccountVM: conf.Provider.GCP.IAMServiceAccountVM, } } @@ -241,7 +228,6 @@ func gcpTerraformIAMVars(conf *config.Config, oldVars terraform.GCPIAMVariables) Region: conf.Provider.GCP.Region, Zone: conf.Provider.GCP.Zone, ServiceAccountID: oldVars.ServiceAccountID, - NamePrefix: oldVars.NamePrefix, } } @@ -268,14 +254,6 @@ func openStackTerraformVars(conf *config.Config, imageRef string) (*terraform.Op StateDiskType: group.StateDiskType, } } - - // since openstack does not support tags in the form of key = value, the tags will be converted - // to an array of "key=value" strings - tags := []string{} - for key, value := range conf.Tags { - tags = append(tags, fmt.Sprintf("%s=%s", key, value)) - } - return &terraform.OpenStackClusterVariables{ Name: conf.Name, Cloud: toPtr(conf.Provider.OpenStack.Cloud), @@ -287,7 +265,6 @@ func openStackTerraformVars(conf *config.Config, imageRef string) (*terraform.Op CustomEndpoint: conf.CustomEndpoint, InternalLoadBalancer: conf.InternalLoadBalancer, STACKITProjectID: conf.Provider.OpenStack.STACKITProjectID, - AdditionalTags: tags, }, nil } @@ -367,7 +344,7 @@ func qemuTerraformVars( ImagePath: imagePath, ImageFormat: conf.Provider.QEMU.ImageFormat, NodeGroups: nodeGroups, - Machine: "q35", + Machine: "q35", // TODO(elchead): make configurable AB#3225 MetadataAPIImage: conf.Provider.QEMU.MetadataAPIImage, MetadataLibvirtURI: metadataLibvirtURI, NVRAM: conf.Provider.QEMU.NVRAM, diff --git a/cli/internal/cloudcmd/tfvars_test.go b/cli/internal/cloudcmd/tfvars_test.go index dc249bee0..1a6b2a875 100644 --- a/cli/internal/cloudcmd/tfvars_test.go +++ b/cli/internal/cloudcmd/tfvars_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudcmd diff --git a/cli/internal/cmd/BUILD.bazel b/cli/internal/cmd/BUILD.bazel index bc6a71a50..8c300b62a 100644 --- a/cli/internal/cmd/BUILD.bazel +++ b/cli/internal/cmd/BUILD.bazel @@ -37,7 +37,6 @@ go_library( "miniup_linux_amd64.go", "recover.go", "spinner.go", - "ssh.go", "status.go", "terminate.go", "upgrade.go", @@ -110,15 +109,8 @@ go_library( "@io_k8s_client_go//tools/clientcmd", "@io_k8s_client_go//tools/clientcmd/api/latest", "@io_k8s_sigs_yaml//:yaml", + "@org_golang_google_grpc//:go_default_library", "@org_golang_x_mod//semver", - "@org_golang_google_grpc//:grpc", - "@com_github_google_go_tdx_guest//abi", - "@com_github_google_go_tdx_guest//proto/tdx", - "//internal/attestation/azure/tdx", - "@com_github_google_go_sev_guest//proto/sevsnp", - "@com_github_google_go_tpm_tools//proto/attest", - "@org_golang_x_crypto//ssh", - "//internal/kms/setup", ] + select({ "@io_bazel_rules_go//go/platform:android_amd64": [ "@org_golang_x_sys//unix", @@ -145,7 +137,6 @@ go_test( "maapatch_test.go", "recover_test.go", "spinner_test.go", - "ssh_test.go", "status_test.go", "terminate_test.go", "upgradeapply_test.go", @@ -202,10 +193,9 @@ go_test( "@io_k8s_apimachinery//pkg/runtime/schema", "@io_k8s_client_go//tools/clientcmd", "@io_k8s_client_go//tools/clientcmd/api", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", - "@org_golang_x_crypto//ssh", "@org_golang_x_mod//semver", "@org_uber_go_goleak//:goleak", ], diff --git a/cli/internal/cmd/apply.go b/cli/internal/cmd/apply.go index d65337a0c..0c524302e 100644 --- a/cli/internal/cmd/apply.go +++ b/cli/internal/cmd/apply.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -14,6 +14,7 @@ import ( "io" "io/fs" "log/slog" + "net" "os" "path/filepath" "slices" @@ -210,6 +211,10 @@ func (f *applyFlags) parse(flags *pflag.FlagSet) error { // runApply sets up the apply command and runs it. func runApply(cmd *cobra.Command, _ []string) error { + log, err := newCLILogger(cmd) + if err != nil { + return fmt.Errorf("creating logger: %w", err) + } spinner, err := newSpinnerOrStderr(cmd) if err != nil { return err @@ -222,13 +227,13 @@ func runApply(cmd *cobra.Command, _ []string) error { } fileHandler := file.NewHandler(afero.NewOsFs()) - debugLogger, err := newDebugFileLogger(cmd, fileHandler) + logger, err := newDebugFileLogger(cmd, fileHandler) if err != nil { return err } newDialer := func(validator atls.Validator) *dialer.Dialer { - return dialer.New(nil, validator, nil) + return dialer.New(nil, validator, &net.Dialer{}) } upgradeID := generateUpgradeID(upgradeCmdKindApply) @@ -245,15 +250,15 @@ func runApply(cmd *cobra.Command, _ []string) error { ) } - applier := constellation.NewApplier(debugLogger, spinner, constellation.ApplyContextCLI, newDialer) + applier := constellation.NewApplier(log, spinner, constellation.ApplyContextCLI, newDialer) apply := &applyCmd{ fileHandler: fileHandler, flags: flags, - log: debugLogger, - wLog: &warnLogger{cmd: cmd, log: debugLogger}, + log: logger, + wLog: &warnLogger{cmd: cmd, log: log}, spinner: spinner, - merger: &kubeconfigMerger{log: debugLogger}, + merger: &kubeconfigMerger{log: log}, newInfraApplier: newInfraApplier, imageFetcher: imagefetcher.New(), applier: applier, @@ -367,7 +372,7 @@ func (a *applyCmd) apply( // Check current Terraform state, if it exists and infrastructure upgrades are not skipped, // and apply migrations if necessary. if !a.flags.skipPhases.contains(skipInfrastructurePhase) { - if err := a.runTerraformApply(cmd, conf, stateFile, upgradeDir, a.flags.yes); err != nil { + if err := a.runTerraformApply(cmd, conf, stateFile, upgradeDir); err != nil { return fmt.Errorf("applying Terraform configuration: %w", err) } } @@ -417,15 +422,9 @@ func (a *applyCmd) apply( // Apply Helm Charts if !a.flags.skipPhases.contains(skipHelmPhase) { - if err := a.applier.AnnotateCoreDNSResources(cmd.Context()); err != nil { - return fmt.Errorf("annotating CoreDNS: %w", err) - } if err := a.runHelmApply(cmd, conf, stateFile, upgradeDir); err != nil { return err } - if err := a.applier.CleanupCoreDNSResources(cmd.Context()); err != nil { - return fmt.Errorf("cleaning up CoreDNS: %w", err) - } } // Upgrade node image @@ -450,7 +449,7 @@ func (a *applyCmd) apply( func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) { // Read user's config and state file - a.log.Debug(fmt.Sprintf("Reading config from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + a.log.Debug(fmt.Sprintf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -460,7 +459,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debug(fmt.Sprintf("Reading state file from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) + a.log.Debug(fmt.Sprintf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename) if err != nil { return nil, nil, err @@ -529,10 +528,10 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If we need to run the init RPC, the version has to be valid // Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade // We skip version validation if the user explicitly skips the Kubernetes phase - a.log.Debug(fmt.Sprintf("Validating Kubernetes version %q", conf.KubernetesVersion)) + a.log.Debug(fmt.Sprintf("Validating Kubernetes version %s", conf.KubernetesVersion)) validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true) if err != nil { - a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %q", err)) + a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %s", err)) if !a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, err } @@ -571,7 +570,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion) } conf.KubernetesVersion = validVersion - a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %q", conf.KubernetesVersion)) + a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %s", conf.KubernetesVersion)) // Validate microservice version (helm versions) in the user's config matches the version of the CLI // This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC @@ -599,7 +598,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat ) error { clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant()) if err != nil { - a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %q", err)) + a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %s", err)) if k8serrors.IsNotFound(err) { a.log.Debug("Creating new join config") return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt) @@ -821,7 +820,6 @@ func (wl warnLogger) Info(msg string, args ...any) { // Warn prints a formatted warning from the validator. func (wl warnLogger) Warn(msg string, args ...any) { wl.cmd.PrintErrf("Warning: %s %s\n", msg, fmt.Sprint(args...)) - wl.log.Debug(msg, args...) } type warnLog interface { @@ -845,8 +843,6 @@ type applier interface { // methods required to install/upgrade Helm charts - AnnotateCoreDNSResources(context.Context) error - CleanupCoreDNSResources(context.Context) error PrepareHelmCharts( flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret, ) (helm.Applier, bool, error) diff --git a/cli/internal/cmd/apply_test.go b/cli/internal/cmd/apply_test.go index 9df359668..064e1f42b 100644 --- a/cli/internal/cmd/apply_test.go +++ b/cli/internal/cmd/apply_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -199,7 +199,7 @@ func TestBackupHelmCharts(t *testing.T) { log: logger.NewTest(t), } - err := a.backupHelmCharts(t.Context(), tc.helmApplier, tc.includesUpgrades, "") + err := a.backupHelmCharts(context.Background(), tc.helmApplier, tc.includesUpgrades, "") if tc.wantErr { assert.Error(err) return @@ -256,7 +256,6 @@ func TestValidateInputs(t *testing.T) { ClientX509CertURL: "client_cert", })) cfg.Provider.GCP.ServiceAccountKeyPath = "saKey.json" - cfg.Provider.GCP.IAMServiceAccountVM = "example@example.com" } require.NoError(fh.WriteYAML(constants.ConfigFilename, cfg)) @@ -554,8 +553,6 @@ func (s *stubConstellApplier) Init(context.Context, atls.Validator, *state.State } type helmApplier interface { - AnnotateCoreDNSResources(context.Context) error - CleanupCoreDNSResources(ctx context.Context) error PrepareHelmCharts( flags helm.Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret, ) ( diff --git a/cli/internal/cmd/applyhelm.go b/cli/internal/cmd/applyhelm.go index 2626da306..b9e1538d6 100644 --- a/cli/internal/cmd/applyhelm.go +++ b/cli/internal/cmd/applyhelm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -42,7 +42,6 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi HelmWaitMode: a.flags.helmWaitMode, ApplyTimeout: a.flags.helmTimeout, AllowDestructive: helm.DenyDestructive, - ServiceCIDR: conf.ServiceCIDR, } if conf.Provider.OpenStack != nil { var deployYawolLoadBalancer bool @@ -121,7 +120,7 @@ func (a *applyCmd) backupHelmCharts( if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil { return fmt.Errorf("saving Helm charts to disk: %w", err) } - a.log.Debug(fmt.Sprintf("Helm charts saved to %q", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))) + a.log.Debug(fmt.Sprintf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))) if includesUpgrades { a.log.Debug("Creating backup of CRDs and CRs") diff --git a/cli/internal/cmd/applyinit.go b/cli/internal/cmd/applyinit.go index 2bdaa32f8..34ab7f1a9 100644 --- a/cli/internal/cmd/applyinit.go +++ b/cli/internal/cmd/applyinit.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -29,7 +29,7 @@ import ( // On success, it writes the Kubernetes admin config file to disk. // Therefore it is skipped if the Kubernetes admin config file already exists. func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) { - a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) + a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog) if err != nil { return nil, fmt.Errorf("creating validator: %w", err) @@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput( if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil { return fmt.Errorf("writing kubeconfig: %w", err) } - a.log.Debug(fmt.Sprintf("Kubeconfig written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) + a.log.Debug(fmt.Sprintf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) if mergeConfig { if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil { @@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput( return fmt.Errorf("writing Constellation state file: %w", err) } - a.log.Debug(fmt.Sprintf("Constellation state file written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) + a.log.Debug(fmt.Sprintf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) if !mergeConfig { fmt.Fprintln(wr, "You can now connect to your cluster by executing:") diff --git a/cli/internal/cmd/applyterraform.go b/cli/internal/cmd/applyterraform.go index 14cba64e4..668ad0eed 100644 --- a/cli/internal/cmd/applyterraform.go +++ b/cli/internal/cmd/applyterraform.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -22,7 +22,7 @@ import ( ) // runTerraformApply checks if changes to Terraform are required and applies them. -func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string, yesFlag bool) error { +func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error { a.log.Debug("Checking if Terraform migrations are required") terraformClient, removeClient, err := a.newInfraApplier(cmd.Context()) if err != nil { @@ -36,20 +36,6 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st return fmt.Errorf("checking if Terraform workspace is empty: %w", err) } - if !isNewCluster && cloudcmd.UpgradeRequiresIAMMigration(conf.GetProvider()) { - cmd.Println("WARNING: This upgrade requires an IAM migration. Please make sure you have applied the IAM migration using `iam upgrade apply` before continuing.") - if !yesFlag { - yes, err := askToConfirm(cmd, "Did you upgrade the IAM resources?") - if err != nil { - return fmt.Errorf("asking for confirmation: %w", err) - } - if !yes { - cmd.Println("Skipping upgrade.") - return nil - } - } - } - if changesRequired, err := a.planTerraformChanges(cmd, conf, terraformClient); err != nil { return fmt.Errorf("planning Terraform migrations: %w", err) } else if !changesRequired { diff --git a/cli/internal/cmd/cloud.go b/cli/internal/cmd/cloud.go index e16e2331e..82a699a7f 100644 --- a/cli/internal/cmd/cloud.go +++ b/cli/internal/cmd/cloud.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/cloud_test.go b/cli/internal/cmd/cloud_test.go index 20ba89808..d4844d092 100644 --- a/cli/internal/cmd/cloud_test.go +++ b/cli/internal/cmd/cloud_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/cmd.go b/cli/internal/cmd/cmd.go index 69b542d89..a5997b941 100644 --- a/cli/internal/cmd/cmd.go +++ b/cli/internal/cmd/cmd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/cli/internal/cmd/config.go b/cli/internal/cmd/config.go index 9386a8838..3392df473 100644 --- a/cli/internal/cmd/config.go +++ b/cli/internal/cmd/config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/configfetchmeasurements.go b/cli/internal/cmd/configfetchmeasurements.go index a9b777973..04af8632c 100644 --- a/cli/internal/cmd/configfetchmeasurements.go +++ b/cli/internal/cmd/configfetchmeasurements.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -104,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err := cfm.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - cfm.log.Debug("Using flags", "insecure", cfm.flags.insecure, "measurementsURL", cfm.flags.measurementsURL, "signatureURL", cfm.flags.signatureURL) + cfm.log.Debug(fmt.Sprintf("Using flags %+v", cfm.flags)) fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL) return cfm.configFetchMeasurements(cmd, fileHandler, fetcher) @@ -152,14 +152,14 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( return fmt.Errorf("fetching and verifying measurements: %w", err) } } - cfm.log.Debug(fmt.Sprintf("Measurements: %s", fetchedMeasurements.String())) + cfm.log.Debug(fmt.Sprintf("Measurements: %#v\n", fetchedMeasurements)) cfm.log.Debug("Updating measurements in configuration") conf.UpdateMeasurements(fetchedMeasurements) if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err } - cfm.log.Debug(fmt.Sprintf("Configuration written to %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + cfm.log.Debug(fmt.Sprintf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) cmd.Print("Successfully fetched measurements and updated Configuration\n") return nil } diff --git a/cli/internal/cmd/configfetchmeasurements_test.go b/cli/internal/cmd/configfetchmeasurements_test.go index 5eca25c1c..9cebbb7da 100644 --- a/cli/internal/cmd/configfetchmeasurements_test.go +++ b/cli/internal/cmd/configfetchmeasurements_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -204,8 +204,18 @@ func (f stubVerifyFetcher) FetchAndVerifyMeasurements(_ context.Context, _ strin type stubAttestationFetcher struct{} -func (f stubAttestationFetcher) FetchLatestVersion(_ context.Context, _ variant.Variant) (attestationconfigapi.Entry, error) { - return attestationconfigapi.Entry{ +func (f stubAttestationFetcher) FetchSEVSNPVersionList(_ context.Context, _ attestationconfigapi.SEVSNPVersionList) (attestationconfigapi.SEVSNPVersionList, error) { + return attestationconfigapi.SEVSNPVersionList{}, nil +} + +func (f stubAttestationFetcher) FetchSEVSNPVersion(_ context.Context, _ attestationconfigapi.SEVSNPVersionAPI) (attestationconfigapi.SEVSNPVersionAPI, error) { + return attestationconfigapi.SEVSNPVersionAPI{ + SEVSNPVersion: testCfg, + }, nil +} + +func (f stubAttestationFetcher) FetchSEVSNPVersionLatest(_ context.Context, _ variant.Variant) (attestationconfigapi.SEVSNPVersionAPI, error) { + return attestationconfigapi.SEVSNPVersionAPI{ SEVSNPVersion: testCfg, }, nil } diff --git a/cli/internal/cmd/configgenerate.go b/cli/internal/cmd/configgenerate.go index f5cc297e4..cfbe10b59 100644 --- a/cli/internal/cmd/configgenerate.go +++ b/cli/internal/cmd/configgenerate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -37,7 +37,6 @@ func newConfigGenerateCmd() *cobra.Command { } cmd.Flags().StringP("kubernetes", "k", semver.MajorMinor(string(config.Default().KubernetesVersion)), "Kubernetes version to use in format MAJOR.MINOR") cmd.Flags().StringP("attestation", "a", "", fmt.Sprintf("attestation variant to use %s. If not specified, the default for the cloud provider is used", printFormattedSlice(variant.GetAvailableAttestationVariants()))) - cmd.Flags().StringSliceP("tags", "t", nil, "additional tags for created resources given a list of key=value") return cmd } @@ -46,7 +45,6 @@ type generateFlags struct { rootFlags k8sVersion versions.ValidK8sVersion attestationVariant variant.Variant - tags cloudprovider.Tags } func (f *generateFlags) parse(flags *pflag.FlagSet) error { @@ -66,12 +64,6 @@ func (f *generateFlags) parse(flags *pflag.FlagSet) error { } f.attestationVariant = variant - tags, err := parseTagsFlags(flags) - if err != nil { - return err - } - f.tags = tags - return nil } @@ -93,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error { if err := cg.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - log.Debug("Using flags", "k8sVersion", cg.flags.k8sVersion, "attestationVariant", cg.flags.attestationVariant) + log.Debug(fmt.Sprintf("Parsed flags as %+v", cg.flags)) return cg.configGenerate(cmd, fileHandler, provider, args[0]) } func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error { - cg.log.Debug(fmt.Sprintf("Using cloud provider %q", provider.String())) + cg.log.Debug(fmt.Sprintf("Using cloud provider %s", provider.String())) // Config creation conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant) @@ -107,7 +99,6 @@ func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file return fmt.Errorf("creating config: %w", err) } conf.KubernetesVersion = cg.flags.k8sVersion - conf.Tags = cg.flags.tags cg.log.Debug("Writing YAML data to configuration file") if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptMkdirAll); err != nil { return fmt.Errorf("writing config file: %w", err) @@ -230,27 +221,3 @@ func parseAttestationFlag(flags *pflag.FlagSet) (variant.Variant, error) { return attestationVariant, nil } - -func parseTagsFlags(flags *pflag.FlagSet) (cloudprovider.Tags, error) { - tagsSlice, err := flags.GetStringSlice("tags") - if err != nil { - return nil, fmt.Errorf("getting tags flag: %w", err) - } - - // no tags given - if tagsSlice == nil { - return nil, nil - } - - tags := make(cloudprovider.Tags) - for _, tag := range tagsSlice { - tagSplit := strings.Split(tag, "=") - if len(tagSplit) != 2 { - return nil, fmt.Errorf("wrong format of tags: expected \"key=value\", got %q", tag) - } - - tags[tagSplit[0]] = tagSplit[1] - } - - return tags, nil -} diff --git a/cli/internal/cmd/configgenerate_test.go b/cli/internal/cmd/configgenerate_test.go index cbb1349e9..d1a4fbc92 100644 --- a/cli/internal/cmd/configgenerate_test.go +++ b/cli/internal/cmd/configgenerate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -235,11 +235,6 @@ func TestValidProviderAttestationCombination(t *testing.T) { variant.GCPSEVES{}, config.AttestationConfig{GCPSEVES: defaultAttestation.GCPSEVES}, }, - { - cloudprovider.GCP, - variant.GCPSEVSNP{}, - config.AttestationConfig{GCPSEVSNP: defaultAttestation.GCPSEVSNP}, - }, { cloudprovider.QEMU, variant.QEMUVTPM{}, @@ -291,10 +286,6 @@ func TestParseAttestationFlag(t *testing.T) { attestationFlag: "gcp-sev-es", wantVariant: variant.GCPSEVES{}, }, - "GCPSEVSNP": { - attestationFlag: "gcp-sev-snp", - wantVariant: variant.GCPSEVSNP{}, - }, "QEMUVTPM": { attestationFlag: "qemu-vtpm", wantVariant: variant.QEMUVTPM{}, diff --git a/cli/internal/cmd/configinstancetypes.go b/cli/internal/cmd/configinstancetypes.go index 0c4c4a73d..555ad5bb2 100644 --- a/cli/internal/cmd/configinstancetypes.go +++ b/cli/internal/cmd/configinstancetypes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/configkubernetesversions.go b/cli/internal/cmd/configkubernetesversions.go index 4cc24da6a..54183039d 100644 --- a/cli/internal/cmd/configkubernetesversions.go +++ b/cli/internal/cmd/configkubernetesversions.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/configmigrate.go b/cli/internal/cmd/configmigrate.go index 2534ae3df..f8bf7190a 100644 --- a/cli/internal/cmd/configmigrate.go +++ b/cli/internal/cmd/configmigrate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/create.go b/cli/internal/cmd/create.go index 824ea1a2a..994c7e840 100644 --- a/cli/internal/cmd/create.go +++ b/cli/internal/cmd/create.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/create_test.go b/cli/internal/cmd/create_test.go index 126ec80f8..f6290cce8 100644 --- a/cli/internal/cmd/create_test.go +++ b/cli/internal/cmd/create_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iam.go b/cli/internal/cmd/iam.go index 1208ad84f..229a0b2ee 100644 --- a/cli/internal/cmd/iam.go +++ b/cli/internal/cmd/iam.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iamcreate.go b/cli/internal/cmd/iamcreate.go index 85f18b91d..4067b33b0 100644 --- a/cli/internal/cmd/iamcreate.go +++ b/cli/internal/cmd/iamcreate.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -29,9 +29,6 @@ var ( regionRegex = regexp.MustCompile(`^\w+-\w+[0-9]$`) // Source: https://cloud.google.com/resource-manager/reference/rest/v1/projects. gcpIDRegex = regexp.MustCompile(`^[a-z][-a-z0-9]{4,28}[a-z0-9]$`) - - // We currently append 6 characters to the prefix, therefore we remove 6 characters from the gcpIDRegex. - gcpPrefixRegex = regexp.MustCompile(`^[a-z][-a-z0-9]{4,22}[a-z0-9]$`) ) // newIAMCreateCmd returns a new cobra.Command for the iam create parent command. It needs another verb, and does nothing on its own. @@ -136,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error { var conf config.Config if c.flags.updateConfig { - c.log.Debug(fmt.Sprintf("Parsing config %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + c.log.Debug(fmt.Sprintf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil { return fmt.Errorf("error reading the configuration file: %w", err) } @@ -164,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error { } if c.flags.updateConfig { - c.log.Debug(fmt.Sprintf("Writing IAM configuration to %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) + c.log.Debug(fmt.Sprintf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) c.providerCreator.writeOutputValuesToConfig(&conf, iamFile) if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err diff --git a/cli/internal/cmd/iamcreate_test.go b/cli/internal/cmd/iamcreate_test.go index 427a0a262..3a9c83051 100644 --- a/cli/internal/cmd/iamcreate_test.go +++ b/cli/internal/cmd/iamcreate_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -456,7 +456,6 @@ func TestIAMCreateGCP(t *testing.T) { creator *stubIAMCreator zoneFlag string serviceAccountIDFlag string - namePrefixFlag string projectIDFlag string yesFlag bool updateConfigFlag bool @@ -467,14 +466,6 @@ func TestIAMCreateGCP(t *testing.T) { wantErr bool }{ "iam create gcp": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - yesFlag: true, - }, - "iam create gcp with deprecated serice account flag": { setupFs: defaultFs, creator: &stubIAMCreator{id: validIAMIDFile}, zoneFlag: "europe-west1-a", @@ -483,91 +474,91 @@ func TestIAMCreateGCP(t *testing.T) { yesFlag: true, }, "iam create gcp with existing config": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - yesFlag: true, - existingConfigFiles: []string{constants.ConfigFilename}, + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + yesFlag: true, + existingConfigFiles: []string{constants.ConfigFilename}, }, "iam create gcp --update-config": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - updateConfigFlag: true, - yesFlag: true, - existingConfigFiles: []string{constants.ConfigFilename}, + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + updateConfigFlag: true, + yesFlag: true, + existingConfigFiles: []string{constants.ConfigFilename}, }, "iam create gcp existing terraform dir": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", existingDirs: []string{constants.TerraformIAMWorkingDir}, yesFlag: true, wantErr: true, }, "iam create gcp invalid b64": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: invalidIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - yesFlag: true, - wantErr: true, + setupFs: defaultFs, + creator: &stubIAMCreator{id: invalidIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + yesFlag: true, + wantErr: true, }, "interactive": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - stdin: "yes\n", + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + stdin: "yes\n", }, "interactive update config": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - stdin: "yes\n", - updateConfigFlag: true, - existingConfigFiles: []string{constants.ConfigFilename}, + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + stdin: "yes\n", + updateConfigFlag: true, + existingConfigFiles: []string{constants.ConfigFilename}, }, "interactive abort": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - stdin: "no\n", - wantAbort: true, + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + stdin: "no\n", + wantAbort: true, }, "interactive abort update config": { - setupFs: defaultFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - stdin: "no\n", - wantAbort: true, - updateConfigFlag: true, - existingConfigFiles: []string{constants.ConfigFilename}, + setupFs: defaultFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + stdin: "no\n", + wantAbort: true, + updateConfigFlag: true, + existingConfigFiles: []string{constants.ConfigFilename}, }, "unwritable fs": { - setupFs: readOnlyFs, - creator: &stubIAMCreator{id: validIAMIDFile}, - zoneFlag: "europe-west1-a", - namePrefixFlag: "constell-test", - projectIDFlag: "constell-1234", - yesFlag: true, - updateConfigFlag: true, - wantErr: true, + setupFs: readOnlyFs, + creator: &stubIAMCreator{id: validIAMIDFile}, + zoneFlag: "europe-west1-a", + serviceAccountIDFlag: "constell-test", + projectIDFlag: "constell-1234", + yesFlag: true, + updateConfigFlag: true, + wantErr: true, }, } @@ -599,7 +590,6 @@ func TestIAMCreateGCP(t *testing.T) { flags: gcpIAMCreateFlags{ zone: tc.zoneFlag, serviceAccountID: tc.serviceAccountIDFlag, - namePrefix: tc.serviceAccountIDFlag, projectID: tc.projectIDFlag, }, }, diff --git a/cli/internal/cmd/iamcreateaws.go b/cli/internal/cmd/iamcreateaws.go index 1a29076c5..b648b87fc 100644 --- a/cli/internal/cmd/iamcreateaws.go +++ b/cli/internal/cmd/iamcreateaws.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iamcreateazure.go b/cli/internal/cmd/iamcreateazure.go index f2ee7982b..d80bcb654 100644 --- a/cli/internal/cmd/iamcreateazure.go +++ b/cli/internal/cmd/iamcreateazure.go @@ -1,14 +1,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd import ( - "errors" "fmt" - "os" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -28,7 +26,6 @@ func newIAMCreateAzureCmd() *cobra.Command { RunE: runIAMCreateAzure, } - cmd.Flags().String("subscriptionID", "", "subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set") cmd.Flags().String("resourceGroup", "", "name prefix of the two resource groups your cluster / IAM resources will be created in (required)") must(cobra.MarkFlagRequired(cmd.Flags(), "resourceGroup")) cmd.Flags().String("region", "", "region the resources will be created in, e.g., westus (required)") @@ -48,7 +45,6 @@ func runIAMCreateAzure(cmd *cobra.Command, _ []string) error { // azureIAMCreateFlags contains the parsed flags of the iam create azure command. type azureIAMCreateFlags struct { - subscriptionID string region string resourceGroup string servicePrincipal string @@ -56,14 +52,6 @@ type azureIAMCreateFlags struct { func (f *azureIAMCreateFlags) parse(flags *pflag.FlagSet) error { var err error - f.subscriptionID, err = flags.GetString("subscriptionID") - if err != nil { - return fmt.Errorf("getting 'subscriptionID' flag: %w", err) - } - if f.subscriptionID == "" && os.Getenv("ARM_SUBSCRIPTION_ID") == "" { - return errors.New("either flag 'subscriptionID' or environment variable 'ARM_SUBSCRIPTION_ID' must be set") - } - f.region, err = flags.GetString("region") if err != nil { return fmt.Errorf("getting 'region' flag: %w", err) @@ -87,7 +75,6 @@ type azureIAMCreator struct { func (c *azureIAMCreator) getIAMConfigOptions() *cloudcmd.IAMConfigOptions { return &cloudcmd.IAMConfigOptions{ Azure: cloudcmd.AzureIAMConfig{ - SubscriptionID: c.flags.subscriptionID, Location: c.flags.region, ResourceGroup: c.flags.resourceGroup, ServicePrincipal: c.flags.servicePrincipal, @@ -96,7 +83,6 @@ func (c *azureIAMCreator) getIAMConfigOptions() *cloudcmd.IAMConfigOptions { } func (c *azureIAMCreator) printConfirmValues(cmd *cobra.Command) { - cmd.Printf("Subscription ID:\t%s\n", c.flags.subscriptionID) cmd.Printf("Region:\t\t\t%s\n", c.flags.region) cmd.Printf("Resource Group:\t\t%s\n", c.flags.resourceGroup) cmd.Printf("Service Principal:\t%s\n\n", c.flags.servicePrincipal) diff --git a/cli/internal/cmd/iamcreategcp.go b/cli/internal/cmd/iamcreategcp.go index f2c6c6ead..b6c55e5d1 100644 --- a/cli/internal/cmd/iamcreategcp.go +++ b/cli/internal/cmd/iamcreategcp.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -31,19 +31,13 @@ func newIAMCreateGCPCmd() *cobra.Command { cmd.Flags().String("zone", "", "GCP zone the cluster will be deployed in (required)\n"+ "Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available") must(cobra.MarkFlagRequired(cmd.Flags(), "zone")) - - cmd.Flags().String("serviceAccountID", "", "[Deprecated use \"--prefix\"]ID for the service account that will be created (required)\n"+ - "Must be 6 to 30 lowercase letters, digits, or hyphens. This flag is mutually exclusive with --prefix.") - cmd.Flags().String("prefix", "", "Prefix for the service account ID and VM ID that will be created (required)\n"+ - "Must be letters, digits, or hyphens.") - + cmd.Flags().String("serviceAccountID", "", "ID for the service account that will be created (required)\n"+ + "Must be 6 to 30 lowercase letters, digits, or hyphens.") + must(cobra.MarkFlagRequired(cmd.Flags(), "serviceAccountID")) cmd.Flags().String("projectID", "", "ID of the GCP project the configuration will be created in (required)\n"+ "Find it on the welcome screen of your project: https://console.cloud.google.com/welcome") must(cobra.MarkFlagRequired(cmd.Flags(), "projectID")) - cmd.MarkFlagsMutuallyExclusive([]string{"prefix", "serviceAccountID"}...) - must(cmd.Flags().MarkDeprecated("serviceAccountID", "use --prefix instead")) - return cmd } @@ -59,7 +53,6 @@ func runIAMCreateGCP(cmd *cobra.Command, _ []string) error { type gcpIAMCreateFlags struct { rootFlags serviceAccountID string - namePrefix string zone string region string projectID string @@ -98,18 +91,9 @@ func (f *gcpIAMCreateFlags) parse(flags *pflag.FlagSet) error { if err != nil { return fmt.Errorf("getting 'serviceAccountID' flag: %w", err) } - if f.serviceAccountID != "" && !gcpIDRegex.MatchString(f.serviceAccountID) { + if !gcpIDRegex.MatchString(f.serviceAccountID) { return fmt.Errorf("serviceAccountID %q doesn't match %s", f.serviceAccountID, gcpIDRegex) } - - f.namePrefix, err = flags.GetString("prefix") - if err != nil { - return fmt.Errorf("getting 'prefix' flag: %w", err) - } - if f.namePrefix != "" && !gcpPrefixRegex.MatchString(f.namePrefix) { - return fmt.Errorf("prefix %q doesn't match %s", f.namePrefix, gcpIDRegex) - } - return nil } @@ -125,19 +109,13 @@ func (c *gcpIAMCreator) getIAMConfigOptions() *cloudcmd.IAMConfigOptions { Region: c.flags.region, ProjectID: c.flags.projectID, ServiceAccountID: c.flags.serviceAccountID, - NamePrefix: c.flags.namePrefix, }, } } func (c *gcpIAMCreator) printConfirmValues(cmd *cobra.Command) { cmd.Printf("Project ID:\t\t%s\n", c.flags.projectID) - if c.flags.namePrefix != "" { - cmd.Printf("Name Prefix:\t\t%s\n", c.flags.namePrefix) - } - if c.flags.serviceAccountID != "" { - cmd.Printf("Service Account ID:\t%s\n", c.flags.serviceAccountID) - } + cmd.Printf("Service Account ID:\t%s\n", c.flags.serviceAccountID) cmd.Printf("Region:\t\t\t%s\n", c.flags.region) cmd.Printf("Zone:\t\t\t%s\n\n", c.flags.zone) } @@ -149,12 +127,11 @@ func (c *gcpIAMCreator) printOutputValues(cmd *cobra.Command, _ cloudcmd.IAMOutp cmd.Printf("serviceAccountKeyPath:\t%s\n\n", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) } -func (c *gcpIAMCreator) writeOutputValuesToConfig(conf *config.Config, out cloudcmd.IAMOutput) { +func (c *gcpIAMCreator) writeOutputValuesToConfig(conf *config.Config, _ cloudcmd.IAMOutput) { conf.Provider.GCP.Project = c.flags.projectID conf.Provider.GCP.ServiceAccountKeyPath = constants.GCPServiceAccountKeyFilename // File was created in workspace, so only the filename is needed. conf.Provider.GCP.Region = c.flags.region conf.Provider.GCP.Zone = c.flags.zone - conf.Provider.GCP.IAMServiceAccountVM = out.GCPOutput.IAMServiceAccountVM for groupName, group := range conf.NodeGroups { group.Zone = c.flags.zone conf.NodeGroups[groupName] = group diff --git a/cli/internal/cmd/iamdestroy.go b/cli/internal/cmd/iamdestroy.go index abcf9290f..f89c939a5 100644 --- a/cli/internal/cmd/iamdestroy.go +++ b/cli/internal/cmd/iamdestroy.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iamdestroy_test.go b/cli/internal/cmd/iamdestroy_test.go index 675f8df5a..e6dd4feb2 100644 --- a/cli/internal/cmd/iamdestroy_test.go +++ b/cli/internal/cmd/iamdestroy_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iamupgradeapply.go b/cli/internal/cmd/iamupgradeapply.go index bf8f7b275..0a3485d27 100644 --- a/cli/internal/cmd/iamupgradeapply.go +++ b/cli/internal/cmd/iamupgradeapply.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/iamupgradeapply_test.go b/cli/internal/cmd/iamupgradeapply_test.go index 32074f56c..e1d4c19ce 100644 --- a/cli/internal/cmd/iamupgradeapply_test.go +++ b/cli/internal/cmd/iamupgradeapply_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -171,6 +171,14 @@ type stubConfigFetcher struct { fetchLatestErr error } -func (s *stubConfigFetcher) FetchLatestVersion(context.Context, variant.Variant) (attestationconfigapi.Entry, error) { - return attestationconfigapi.Entry{}, s.fetchLatestErr +func (s *stubConfigFetcher) FetchSEVSNPVersion(context.Context, attestationconfigapi.SEVSNPVersionAPI) (attestationconfigapi.SEVSNPVersionAPI, error) { + panic("not implemented") +} + +func (s *stubConfigFetcher) FetchSEVSNPVersionList(context.Context, attestationconfigapi.SEVSNPVersionList) (attestationconfigapi.SEVSNPVersionList, error) { + panic("not implemented") +} + +func (s *stubConfigFetcher) FetchSEVSNPVersionLatest(context.Context, variant.Variant) (attestationconfigapi.SEVSNPVersionAPI, error) { + return attestationconfigapi.SEVSNPVersionAPI{}, s.fetchLatestErr } diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index ce10f67e3..8075db901 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd import ( + "context" "fmt" "io" "os" @@ -81,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand // Set the current context to the cluster we just created cfg.CurrentContext = constellConfig.CurrentContext - c.log.Debug(fmt.Sprintf("Set current context to %q", cfg.CurrentContext)) + c.log.Debug(fmt.Sprintf("Set current context to %s", cfg.CurrentContext)) json, err := runtime.Encode(clientcodec.Codec, cfg) if err != nil { @@ -96,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil { return fmt.Errorf("writing merged kubeconfig to file: %w", err) } - c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %q", clientcmd.RecommendedHomeFile)) + c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)) return nil } @@ -105,5 +106,5 @@ func (c *kubeconfigMerger) kubeconfigEnvVar() string { } type grpcDialer interface { - Dial(target string) (*grpc.ClientConn, error) + Dial(ctx context.Context, target string) (*grpc.ClientConn, error) } diff --git a/cli/internal/cmd/init_test.go b/cli/internal/cmd/init_test.go index 63ee594fe..8d6d2b1bb 100644 --- a/cli/internal/cmd/init_test.go +++ b/cli/internal/cmd/init_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -217,7 +217,7 @@ func TestInitialize(t *testing.T) { require.NoError(fileHandler.WriteJSON(serviceAccPath, tc.serviceAccKey, file.OptNone)) } - ctx := t.Context() + ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 4*time.Second) defer cancel() cmd.SetContext(ctx) @@ -278,14 +278,6 @@ type stubHelmApplier struct { err error } -func (s stubHelmApplier) AnnotateCoreDNSResources(_ context.Context) error { - return nil -} - -func (s stubHelmApplier) CleanupCoreDNSResources(_ context.Context) error { - return nil -} - func (s stubHelmApplier) PrepareHelmCharts( _ helm.Options, _ *state.State, _ string, _ uri.MasterSecret, ) (helm.Applier, bool, error) { @@ -539,10 +531,9 @@ func defaultConfigWithExpectedMeasurements(t *testing.T, conf *config.Config, cs conf.Provider.GCP.Project = "test-project" conf.Provider.GCP.Zone = "test-zone" conf.Provider.GCP.ServiceAccountKeyPath = "test-key-path" - conf.Provider.GCP.IAMServiceAccountVM = "example@example.com" - conf.Attestation.GCPSEVSNP.Measurements[4] = measurements.WithAllBytes(0x44, measurements.Enforce, measurements.PCRMeasurementLength) - conf.Attestation.GCPSEVSNP.Measurements[9] = measurements.WithAllBytes(0x11, measurements.Enforce, measurements.PCRMeasurementLength) - conf.Attestation.GCPSEVSNP.Measurements[12] = measurements.WithAllBytes(0xcc, measurements.Enforce, measurements.PCRMeasurementLength) + conf.Attestation.GCPSEVES.Measurements[4] = measurements.WithAllBytes(0x44, measurements.Enforce, measurements.PCRMeasurementLength) + conf.Attestation.GCPSEVES.Measurements[9] = measurements.WithAllBytes(0x11, measurements.Enforce, measurements.PCRMeasurementLength) + conf.Attestation.GCPSEVES.Measurements[12] = measurements.WithAllBytes(0xcc, measurements.Enforce, measurements.PCRMeasurementLength) zone = "europe-west3-b" instanceType = "n2d-standard-4" diskType = "pd-ssd" diff --git a/cli/internal/cmd/license_enterprise.go b/cli/internal/cmd/license_enterprise.go index 399de4524..d4afe973e 100644 --- a/cli/internal/cmd/license_enterprise.go +++ b/cli/internal/cmd/license_enterprise.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/license_oss.go b/cli/internal/cmd/license_oss.go index 7d584a78c..fd14d35bc 100644 --- a/cli/internal/cmd/license_oss.go +++ b/cli/internal/cmd/license_oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/log.go b/cli/internal/cmd/log.go index d112fcc37..d86f1686f 100644 --- a/cli/internal/cmd/log.go +++ b/cli/internal/cmd/log.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/maapatch.go b/cli/internal/cmd/maapatch.go index 11e86051a..a32e8729a 100644 --- a/cli/internal/cmd/maapatch.go +++ b/cli/internal/cmd/maapatch.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -56,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { } func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error { - c.log.Debug(fmt.Sprintf("Using attestation URL %q", attestationURL)) + c.log.Debug(fmt.Sprintf("Using attestation URL %s", attestationURL)) if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil { return fmt.Errorf("patching MAA attestation policy: %w", err) diff --git a/cli/internal/cmd/maapatch_test.go b/cli/internal/cmd/maapatch_test.go index bca2d0ee7..bbd0e165f 100644 --- a/cli/internal/cmd/maapatch_test.go +++ b/cli/internal/cmd/maapatch_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/mini.go b/cli/internal/cmd/mini.go index 7d6fe087d..4b4774f67 100644 --- a/cli/internal/cmd/mini.go +++ b/cli/internal/cmd/mini.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/minidown.go b/cli/internal/cmd/minidown.go index 525b8539f..594312e28 100644 --- a/cli/internal/cmd/minidown.go +++ b/cli/internal/cmd/minidown.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/miniup.go b/cli/internal/cmd/miniup.go index 1b8c0984a..dfd297d93 100644 --- a/cli/internal/cmd/miniup.go +++ b/cli/internal/cmd/miniup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/miniup_cross.go b/cli/internal/cmd/miniup_cross.go index 2abd7c3b3..4668cc8f3 100644 --- a/cli/internal/cmd/miniup_cross.go +++ b/cli/internal/cmd/miniup_cross.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/miniup_linux_amd64.go b/cli/internal/cmd/miniup_linux_amd64.go index c2c5a052e..c9885d801 100644 --- a/cli/internal/cmd/miniup_linux_amd64.go +++ b/cli/internal/cmd/miniup_linux_amd64.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/pathprefix/pathprefix.go b/cli/internal/cmd/pathprefix/pathprefix.go index 823406232..a70ccfee3 100644 --- a/cli/internal/cmd/pathprefix/pathprefix.go +++ b/cli/internal/cmd/pathprefix/pathprefix.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/cli/internal/cmd/recover.go b/cli/internal/cmd/recover.go index ab5d3ef14..f3efc3e96 100644 --- a/cli/internal/cmd/recover.go +++ b/cli/internal/cmd/recover.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -11,6 +11,7 @@ import ( "errors" "fmt" "io" + "net" "sync" "time" @@ -77,13 +78,13 @@ func runRecover(cmd *cobra.Command, _ []string) error { } fileHandler := file.NewHandler(afero.NewOsFs()) newDialer := func(validator atls.Validator) *dialer.Dialer { - return dialer.New(nil, validator, nil) + return dialer.New(nil, validator, &net.Dialer{}) } r := &recoverCmd{log: log, configFetcher: attestationconfigapi.NewFetcher()} if err := r.flags.parse(cmd.Flags()); err != nil { return err } - r.log.Debug("Using flags", "debug", r.flags.debug, "endpoint", r.flags.endpoint, "force", r.flags.force) + r.log.Debug(fmt.Sprintf("Using flags: %+v", r.flags)) return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer) } @@ -92,7 +93,7 @@ func (r *recoverCmd) recover( doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer, ) error { var masterSecret uri.MasterSecret - r.log.Debug(fmt.Sprintf("Loading master secret file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))) + r.log.Debug(fmt.Sprintf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))) if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return err } @@ -107,7 +108,7 @@ func (r *recoverCmd) recover( return err } - r.log.Debug(fmt.Sprintf("Got provider %q", conf.GetProvider())) + r.log.Debug(fmt.Sprintf("Got provider %s", conf.GetProvider())) if conf.GetProvider() == cloudprovider.Azure { interval = 20 * time.Second // Azure LB takes a while to remove unhealthy instances } @@ -128,14 +129,14 @@ func (r *recoverCmd) recover( conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL) } - r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) + r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log}) if err != nil { return fmt.Errorf("creating new validator: %w", err) } r.log.Debug("Created a new validator") doer.setDialer(newDialer(validator), endpoint) - r.log.Debug(fmt.Sprintf("Set dialer for endpoint %q", endpoint)) + r.log.Debug(fmt.Sprintf("Set dialer for endpoint %s", endpoint)) doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI) r.log.Debug("Set secrets") if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil { @@ -165,7 +166,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti }) } - r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", retry, err)) + r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", retry, err)) return retry } @@ -216,7 +217,7 @@ type recoverDoer struct { // Do performs the recover streaming rpc. func (d *recoverDoer) Do(ctx context.Context) (retErr error) { - conn, err := d.dialer.Dial(d.endpoint) + conn, err := d.dialer.Dial(ctx, d.endpoint) if err != nil { return fmt.Errorf("dialing recovery server: %w", err) } diff --git a/cli/internal/cmd/recover_test.go b/cli/internal/cmd/recover_test.go index af0817597..41ca89817 100644 --- a/cli/internal/cmd/recover_test.go +++ b/cli/internal/cmd/recover_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -138,7 +138,7 @@ func TestRecover(t *testing.T) { require := require.New(t) cmd := NewRecoverCmd() - cmd.SetContext(t.Context()) + cmd.SetContext(context.Background()) out := &bytes.Buffer{} cmd.SetOut(out) cmd.SetErr(out) @@ -225,7 +225,7 @@ func TestDoRecovery(t *testing.T) { log: r.log, } - err := recoverDoer.Do(t.Context()) + err := recoverDoer.Do(context.Background()) if tc.wantErr { assert.Error(err) } else { diff --git a/cli/internal/cmd/spinner.go b/cli/internal/cmd/spinner.go index f979459c3..4184291a8 100644 --- a/cli/internal/cmd/spinner.go +++ b/cli/internal/cmd/spinner.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/spinner_test.go b/cli/internal/cmd/spinner_test.go index 4cbf40d34..fab3ffa87 100644 --- a/cli/internal/cmd/spinner_test.go +++ b/cli/internal/cmd/spinner_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/ssh.go b/cli/internal/cmd/ssh.go deleted file mode 100644 index 84f9ed67a..000000000 --- a/cli/internal/cmd/ssh.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package cmd - -import ( - "crypto/ed25519" - "crypto/rand" - "fmt" - "time" - - "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/crypto" - "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/kms/setup" - "github.com/edgelesssys/constellation/v2/internal/kms/uri" - "github.com/spf13/afero" - "github.com/spf13/cobra" - - "golang.org/x/crypto/ssh" -) - -// NewSSHCmd returns a new cobra.Command for the ssh command. -func NewSSHCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "ssh", - Short: "Generate a certificate for emergency SSH access", - Long: "Generate a certificate for emergency SSH access to your SSH-enabled constellation cluster.", - Args: cobra.ExactArgs(0), - RunE: runSSH, - } - cmd.Flags().String("key", "", "the path to an existing SSH public key") - must(cmd.MarkFlagRequired("key")) - return cmd -} - -func runSSH(cmd *cobra.Command, _ []string) error { - fh := file.NewHandler(afero.NewOsFs()) - debugLogger, err := newDebugFileLogger(cmd, fh) - if err != nil { - return err - } - - keyPath, err := cmd.Flags().GetString("key") - if err != nil { - return fmt.Errorf("retrieving path to public key from flags: %s", err) - } - - return writeCertificateForKey(cmd, keyPath, fh, debugLogger) -} - -func writeCertificateForKey(cmd *cobra.Command, keyPath string, fh file.Handler, debugLogger debugLog) error { - // NOTE(miampf): Since other KMS aren't fully implemented yet, this commands assumes that the cKMS is used and derives the key accordingly. - var mastersecret uri.MasterSecret - if err := fh.ReadJSON(constants.MasterSecretFilename, &mastersecret); err != nil { - return fmt.Errorf("reading master secret (does %q exist?): %w", constants.MasterSecretFilename, err) - } - - mastersecretURI := uri.MasterSecret{Key: mastersecret.Key, Salt: mastersecret.Salt} - kms, err := setup.KMS(cmd.Context(), uri.NoStoreURI, mastersecretURI.EncodeToURI()) - if err != nil { - return fmt.Errorf("setting up KMS: %s", err) - } - sshCAKeySeed, err := kms.GetDEK(cmd.Context(), crypto.DEKPrefix+constants.SSHCAKeySuffix, ed25519.SeedSize) - if err != nil { - return fmt.Errorf("retrieving key from KMS: %s", err) - } - - ca, err := crypto.GenerateEmergencySSHCAKey(sshCAKeySeed) - if err != nil { - return fmt.Errorf("generating SSH emergency CA key: %s", err) - } - - marshalledKey := string(ssh.MarshalAuthorizedKey(ca.PublicKey())) - debugLogger.Debug("SSH CA KEY generated", "public-key", marshalledKey) - knownHostsContent := fmt.Sprintf("@cert-authority * %s", marshalledKey) - if err := fh.Write("./known_hosts", []byte(knownHostsContent), file.OptMkdirAll); err != nil { - return fmt.Errorf("writing known hosts file: %w", err) - } - - keyBuffer, err := fh.Read(keyPath) - if err != nil { - return fmt.Errorf("reading public key %q: %s", keyPath, err) - } - - pub, _, _, _, err := ssh.ParseAuthorizedKey(keyBuffer) - if err != nil { - return fmt.Errorf("parsing public key %q: %s", keyPath, err) - } - - certificate := ssh.Certificate{ - Key: pub, - CertType: ssh.UserCert, - ValidAfter: uint64(time.Now().Unix()), - ValidBefore: uint64(time.Now().Add(24 * time.Hour).Unix()), - ValidPrincipals: []string{"root"}, - Permissions: ssh.Permissions{ - Extensions: map[string]string{ - "permit-port-forwarding": "", - "permit-pty": "", - }, - }, - } - if err := certificate.SignCert(rand.Reader, ca); err != nil { - return fmt.Errorf("signing certificate: %s", err) - } - - debugLogger.Debug("Signed certificate", "certificate", string(ssh.MarshalAuthorizedKey(&certificate))) - if err := fh.Write("constellation_cert.pub", ssh.MarshalAuthorizedKey(&certificate), file.OptOverwrite); err != nil { - return fmt.Errorf("writing certificate: %s", err) - } - cmd.Printf("You can now connect to a node using the \"constellation_cert.pub\" certificate.\nLook at the documentation for a how-to guide:\n\n\thttps://docs.edgeless.systems/constellation/workflows/troubleshooting#emergency-ssh-access\n") - - return nil -} diff --git a/cli/internal/cmd/ssh_test.go b/cli/internal/cmd/ssh_test.go deleted file mode 100644 index c5ba77c2c..000000000 --- a/cli/internal/cmd/ssh_test.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package cmd - -import ( - "bytes" - "testing" - - "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ssh" -) - -func TestSSH(t *testing.T) { - someSSHPubKey := "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDA1yYg1PIJNjAGjyuv66r8AJtpfBDFLdp3u9lVwkgbVKv1AzcaeTF/NEw+nhNJOjuCZ61LTPj12LZ8Wy/oSm0A= motte@lolcatghost" - someSSHPubKeyPath := "some-key.pub" - someMasterSecret := ` - { - "key": "MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAK", - "salt": "MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAK" - } - ` - testCases := map[string]struct { - fh file.Handler - pubKey string - masterSecret string - wantErr bool - }{ - "everything exists": { - fh: file.NewHandler(afero.NewMemMapFs()), - pubKey: someSSHPubKey, - masterSecret: someMasterSecret, - }, - "no public key": { - fh: file.NewHandler(afero.NewMemMapFs()), - masterSecret: someMasterSecret, - wantErr: true, - }, - "no master secret": { - fh: file.NewHandler(afero.NewMemMapFs()), - pubKey: someSSHPubKey, - wantErr: true, - }, - "malformed public key": { - fh: file.NewHandler(afero.NewMemMapFs()), - pubKey: "asdf", - masterSecret: someMasterSecret, - wantErr: true, - }, - "malformed master secret": { - fh: file.NewHandler(afero.NewMemMapFs()), - masterSecret: "asdf", - pubKey: someSSHPubKey, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - - if tc.pubKey != "" { - require.NoError(tc.fh.Write(someSSHPubKeyPath, []byte(tc.pubKey))) - } - if tc.masterSecret != "" { - require.NoError(tc.fh.Write(constants.MasterSecretFilename, []byte(tc.masterSecret))) - } - - cmd := NewSSHCmd() - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.SetIn(&bytes.Buffer{}) - - err := writeCertificateForKey(cmd, someSSHPubKeyPath, tc.fh, logger.NewTest(t)) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - cert, err := tc.fh.Read("constellation_cert.pub") - require.NoError(err) - _, _, _, _, err = ssh.ParseAuthorizedKey(cert) - require.NoError(err) - } - }) - } -} diff --git a/cli/internal/cmd/status.go b/cli/internal/cmd/status.go index 5d9051c63..c2e83ef3a 100644 --- a/cli/internal/cmd/status.go +++ b/cli/internal/cmd/status.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/status_test.go b/cli/internal/cmd/status_test.go index e46bae917..813391bf0 100644 --- a/cli/internal/cmd/status_test.go +++ b/cli/internal/cmd/status_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/terminate.go b/cli/internal/cmd/terminate.go index a194358ea..c20c3fe2c 100644 --- a/cli/internal/cmd/terminate.go +++ b/cli/internal/cmd/terminate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/terminate_test.go b/cli/internal/cmd/terminate_test.go index 24c9ee717..1999290e9 100644 --- a/cli/internal/cmd/terminate_test.go +++ b/cli/internal/cmd/terminate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/upgrade.go b/cli/internal/cmd/upgrade.go index 6c7db4966..21addcb06 100644 --- a/cli/internal/cmd/upgrade.go +++ b/cli/internal/cmd/upgrade.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/upgradeapply.go b/cli/internal/cmd/upgradeapply.go index 8f2465cbd..a87e4b2c5 100644 --- a/cli/internal/cmd/upgradeapply.go +++ b/cli/internal/cmd/upgradeapply.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/upgradeapply_test.go b/cli/internal/cmd/upgradeapply_test.go index 0062d444d..f396cc828 100644 --- a/cli/internal/cmd/upgradeapply_test.go +++ b/cli/internal/cmd/upgradeapply_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -375,14 +375,6 @@ type mockApplier struct { mock.Mock } -func (m *mockApplier) AnnotateCoreDNSResources(_ context.Context) error { - return nil -} - -func (m *mockApplier) CleanupCoreDNSResources(_ context.Context) error { - return nil -} - func (m *mockApplier) PrepareHelmCharts( helmOpts helm.Options, stateFile *state.State, str string, masterSecret uri.MasterSecret, ) (helm.Applier, bool, error) { diff --git a/cli/internal/cmd/upgradecheck.go b/cli/internal/cmd/upgradecheck.go index 570f5375f..74ec31e08 100644 --- a/cli/internal/cmd/upgradecheck.go +++ b/cli/internal/cmd/upgradecheck.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -187,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco // get current image version of the cluster csp := conf.GetProvider() attestationVariant := conf.GetAttestationConfig().GetVariant() - u.log.Debug(fmt.Sprintf("Using provider %q with attestation variant %q", csp.String(), attestationVariant.String())) + u.log.Debug(fmt.Sprintf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())) current, err := u.collect.currentVersions(cmd.Context()) if err != nil { @@ -198,12 +198,12 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco if err != nil { return err } - u.log.Debug(fmt.Sprintf("Current cli version: %q", current.cli)) - u.log.Debug(fmt.Sprintf("Supported cli version(s): %q", supported.cli)) - u.log.Debug(fmt.Sprintf("Current service version: %q", current.service)) - u.log.Debug(fmt.Sprintf("Supported service version: %q", supported.service)) - u.log.Debug(fmt.Sprintf("Current k8s version: %q", current.k8s)) - u.log.Debug(fmt.Sprintf("Supported k8s version(s): %q", supported.k8s)) + u.log.Debug(fmt.Sprintf("Current cli version: %s", current.cli)) + u.log.Debug(fmt.Sprintf("Supported cli version(s): %s", supported.cli)) + u.log.Debug(fmt.Sprintf("Current service version: %s", current.service)) + u.log.Debug(fmt.Sprintf("Supported service version: %s", supported.service)) + u.log.Debug(fmt.Sprintf("Current k8s version: %s", current.k8s)) + u.log.Debug(fmt.Sprintf("Supported k8s version(s): %s", supported.k8s)) // Filter versions to only include upgrades newServices := supported.service @@ -343,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide // get expected measurements for each image upgrades := make(map[string]measurements.M) for _, version := range versions { - v.log.Debug(fmt.Sprintf("Fetching measurements for image: %q", version.Version())) + v.log.Debug(fmt.Sprintf("Fetching measurements for image: %s", version.Version())) shortPath := version.ShortPath() publicKey, err := keyselect.CosignPublicKeyForVersion(version) @@ -363,8 +363,8 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide continue } upgrades[shortPath] = measurements - v.log.Debug("Compatible image measurement found", shortPath, measurements.String()) } + v.log.Debug(fmt.Sprintf("Compatible image measurements are %v", upgrades)) return upgrades, nil } @@ -452,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co if err != nil { return nil, fmt.Errorf("calculating next image minor version: %w", err) } - v.log.Debug(fmt.Sprintf("Current image minor version is %q", currentImageMinorVer)) - v.log.Debug(fmt.Sprintf("Current CLI minor version is %q", currentCLIMinorVer)) - v.log.Debug(fmt.Sprintf("Next image minor version is %q", nextImageMinorVer)) + v.log.Debug(fmt.Sprintf("Current image minor version is %s", currentImageMinorVer)) + v.log.Debug(fmt.Sprintf("Current CLI minor version is %s", currentCLIMinorVer)) + v.log.Debug(fmt.Sprintf("Next image minor version is %s", nextImageMinorVer)) allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer} switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); { @@ -493,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList) var notFound *fetcher.NotFoundError if errors.As(err, ¬Found) { - v.log.Debug(fmt.Sprintf("Skipping version: %q", err)) + v.log.Debug(fmt.Sprintf("Skipping version: %s", err)) continue } if err != nil { @@ -603,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien } var fetchedMeasurements measurements.M - log.Debug(fmt.Sprintf("Fetching for measurement url: %q", measurementsURL)) + log.Debug(fmt.Sprintf("Fetching for measurement url: %s", measurementsURL)) hash, err := fetchedMeasurements.FetchAndVerify( ctx, client, cosign, @@ -657,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv return nil, fmt.Errorf("parsing version %s: %w", version, err) } if err := target.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %q", version, err)) + v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %s", version, err)) continue } list := versionsapi.List{ @@ -691,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP var compatibleVersions []consemver.Semver for _, version := range cliPatchVersions { if err := version.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %q", version, err)) + v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %s", version, err)) continue } req := versionsapi.CLIInfo{ diff --git a/cli/internal/cmd/upgradecheck_test.go b/cli/internal/cmd/upgradecheck_test.go index 19020fc0d..5e6f8329a 100644 --- a/cli/internal/cmd/upgradecheck_test.go +++ b/cli/internal/cmd/upgradecheck_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -139,7 +139,7 @@ func TestGetCompatibleImageMeasurements(t *testing.T) { } }) - upgrades, err := getCompatibleImageMeasurements(t.Context(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, logger.NewTest(t)) + upgrades, err := getCompatibleImageMeasurements(context.Background(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, logger.NewTest(t)) assert.NoError(err) for _, measurement := range upgrades { @@ -344,7 +344,7 @@ func TestNewCLIVersions(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - _, err := tc.verCollector.newCLIVersions(t.Context()) + _, err := tc.verCollector.newCLIVersions(context.Background()) if tc.wantErr { require.Error(err) return @@ -385,7 +385,7 @@ func TestFilterCompatibleCLIVersions(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - _, err := tc.verCollector.filterCompatibleCLIVersions(t.Context(), tc.cliPatchVersions, consemver.NewFromInt(1, 24, 5, "")) + _, err := tc.verCollector.filterCompatibleCLIVersions(context.Background(), tc.cliPatchVersions, consemver.NewFromInt(1, 24, 5, "")) if tc.wantErr { require.Error(err) return diff --git a/cli/internal/cmd/userinteraction.go b/cli/internal/cmd/userinteraction.go index cef1921a4..4fef6256e 100644 --- a/cli/internal/cmd/userinteraction.go +++ b/cli/internal/cmd/userinteraction.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/userinteraction_test.go b/cli/internal/cmd/userinteraction_test.go index 91472c08b..3222be8d7 100644 --- a/cli/internal/cmd/userinteraction_test.go +++ b/cli/internal/cmd/userinteraction_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/validargs.go b/cli/internal/cmd/validargs.go index 1c83ae3bf..6b6573612 100644 --- a/cli/internal/cmd/validargs.go +++ b/cli/internal/cmd/validargs.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/validargs_test.go b/cli/internal/cmd/validargs_test.go index f29d179e6..63d783e9e 100644 --- a/cli/internal/cmd/validargs_test.go +++ b/cli/internal/cmd/validargs_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/verifier_test.go b/cli/internal/cmd/verifier_test.go index fcb27ab2b..b55c0ab15 100644 --- a/cli/internal/cmd/verifier_test.go +++ b/cli/internal/cmd/verifier_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/verify.go b/cli/internal/cmd/verify.go index 2e9afba11..049f02293 100644 --- a/cli/internal/cmd/verify.go +++ b/cli/internal/cmd/verify.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -21,9 +21,10 @@ import ( "strconv" "strings" + tpmProto "github.com/google/go-tpm-tools/proto/tpm" + "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/atls" - azuretdx "github.com/edgelesssys/constellation/v2/internal/attestation/azure/tdx" "github.com/edgelesssys/constellation/v2/internal/attestation/choose" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/snp" @@ -37,12 +38,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/verify" "github.com/edgelesssys/constellation/v2/verify/verifyproto" - - "github.com/google/go-sev-guest/proto/sevsnp" - "github.com/google/go-tdx-guest/abi" - "github.com/google/go-tdx-guest/proto/tdx" - "github.com/google/go-tpm-tools/proto/attest" - tpmProto "github.com/google/go-tpm-tools/proto/tpm" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -108,10 +103,24 @@ func runVerify(cmd *cobra.Command, _ []string) error { fileHandler := file.NewHandler(afero.NewOsFs()) verifyClient := &constellationVerifier{ - dialer: dialer.New(nil, nil, nil), + dialer: dialer.New(nil, nil, &net.Dialer{}), log: log, } - + formatterFactory := func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error) { + if output == "json" && (!attestation.Equal(variant.AzureSEVSNP{}) && !attestation.Equal(variant.AWSSEVSNP{})) { + return nil, errors.New("json output is only supported for Azure SEV-SNP and AWS SEV-SNP") + } + switch output { + case "json": + return &jsonAttestationDocFormatter{log}, nil + case "raw": + return &rawAttestationDocFormatter{log}, nil + case "": + return &defaultAttestationDocFormatter{log}, nil + default: + return nil, fmt.Errorf("invalid output value for formatter: %s", output) + } + } v := &verifyCmd{ fileHandler: fileHandler, log: log, @@ -119,13 +128,14 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err := v.flags.parse(cmd.Flags()); err != nil { return err } - v.log.Debug("Using flags", "clusterID", v.flags.clusterID, "endpoint", v.flags.endpoint, "ownerID", v.flags.ownerID) - + v.log.Debug(fmt.Sprintf("Using flags: %+v", v.flags)) fetcher := attestationconfigapi.NewFetcher() - return v.verify(cmd, verifyClient, fetcher) + return v.verify(cmd, verifyClient, formatterFactory, fetcher) } -func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, configFetcher attestationconfigapi.Fetcher) error { +type formatterFactory func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error) + +func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factory formatterFactory, configFetcher attestationconfigapi.Fetcher) error { c.log.Debug(fmt.Sprintf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(c.fileHandler, constants.ConfigFilename, configFetcher, c.flags.force) var configValidationErr *config.ValidationError @@ -138,7 +148,10 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, config stateFile, err := state.ReadFromFile(c.fileHandler, constants.StateFilename) if err != nil { - stateFile = state.New() // A state file is only required if the user has not provided IP or ID flags + return fmt.Errorf("reading state file: %w", err) + } + if err := stateFile.Validate(state.PostInit, conf.GetAttestationConfig().GetVariant()); err != nil { + return fmt.Errorf("validating state file: %w", err) } ownerID, clusterID, err := c.validateIDFlags(cmd, stateFile) @@ -162,7 +175,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, config return fmt.Errorf("updating expected PCRs: %w", err) } - c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant())) + c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log}) if err != nil { return fmt.Errorf("creating aTLS validator: %w", err) @@ -186,27 +199,20 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, config return fmt.Errorf("verifying: %w", err) } - var attDocOutput string - switch c.flags.output { - case "json": - attDocOutput, err = formatJSON(cmd.Context(), rawAttestationDoc, attConfig, c.log) - if err != nil { - return fmt.Errorf("printing attestation document: %w", err) - } - - case "raw": - attDocOutput = fmt.Sprintf("Attestation Document:\n%s\n", rawAttestationDoc) - - case "": - attDocOutput, err = formatDefault(cmd.Context(), rawAttestationDoc, attConfig, c.log) - if err != nil { - return fmt.Errorf("printing attestation document: %w", err) - } - - default: - return fmt.Errorf("invalid output value for formatter: %s", c.flags.output) + // certificates are only available for Azure SEV-SNP and AWS SEV-SNP + formatter, err := factory(c.flags.output, conf.GetAttestationConfig().GetVariant(), c.log) + if err != nil { + return fmt.Errorf("creating formatter: %w", err) + } + attDocOutput, err := formatter.format( + cmd.Context(), + rawAttestationDoc, + (!attConfig.GetVariant().Equal(variant.AzureSEVSNP{}) && !attConfig.GetVariant().Equal(variant.AWSSEVSNP{})), + attConfig, + ) + if err != nil { + return fmt.Errorf("printing attestation document: %w", err) } - cmd.Println(attDocOutput) cmd.PrintErrln("Verification OK") @@ -246,92 +252,82 @@ func (c *verifyCmd) validateEndpointFlag(cmd *cobra.Command, stateFile *state.St return endpoint, nil } -// formatJSON returns the json formatted attestation doc. -func formatJSON(ctx context.Context, docString []byte, attestationCfg config.AttestationCfg, log debugLog, -) (string, error) { - doc, err := unmarshalAttDoc(docString, attestationCfg.GetVariant()) - if err != nil { - return "", fmt.Errorf("unmarshalling attestation document: %w", err) - } - - switch attestationCfg.GetVariant() { - case variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.GCPSEVSNP{}: - return snpFormatJSON(ctx, doc.InstanceInfo, attestationCfg, log) - case variant.AzureTDX{}: - return tdxFormatJSON(doc.InstanceInfo, attestationCfg) - default: - return "", fmt.Errorf("json output is not supported for variant %s", attestationCfg.GetVariant()) - } +// an attestationDocFormatter formats the attestation document. +type attestationDocFormatter interface { + // format returns the raw or formatted attestation doc depending on the rawOutput argument. + format(ctx context.Context, docString string, PCRsOnly bool, attestationCfg config.AttestationCfg) (string, error) } -func snpFormatJSON(ctx context.Context, instanceInfoRaw []byte, attestationCfg config.AttestationCfg, log debugLog, +type jsonAttestationDocFormatter struct { + log debugLog +} + +// format returns the json formatted attestation doc. +func (f *jsonAttestationDocFormatter) format(ctx context.Context, docString string, _ bool, + attestationCfg config.AttestationCfg, ) (string, error) { - var instanceInfo snp.InstanceInfo - if err := json.Unmarshal(instanceInfoRaw, &instanceInfo); err != nil { + var doc attestationDoc + if err := json.Unmarshal([]byte(docString), &doc); err != nil { + return "", fmt.Errorf("unmarshal attestation document: %w", err) + } + + instanceInfo, err := extractInstanceInfo(doc) + if err != nil { return "", fmt.Errorf("unmarshalling instance info: %w", err) } - report, err := verify.NewReport(ctx, instanceInfo, attestationCfg, log) + report, err := verify.NewReport(ctx, instanceInfo, attestationCfg, f.log) if err != nil { return "", fmt.Errorf("parsing SNP report: %w", err) } jsonBytes, err := json.Marshal(report) + return string(jsonBytes), err } -func tdxFormatJSON(instanceInfoRaw []byte, attestationCfg config.AttestationCfg) (string, error) { - var rawQuote []byte +type rawAttestationDocFormatter struct { + log debugLog +} - if attestationCfg.GetVariant().Equal(variant.AzureTDX{}) { - var instanceInfo azuretdx.InstanceInfo - if err := json.Unmarshal(instanceInfoRaw, &instanceInfo); err != nil { - return "", fmt.Errorf("unmarshalling instance info: %w", err) - } - rawQuote = instanceInfo.AttestationReport - } +// format returns the raw attestation doc. +func (f *rawAttestationDocFormatter) format(_ context.Context, docString string, _ bool, + _ config.AttestationCfg, +) (string, error) { + b := &strings.Builder{} + b.WriteString("Attestation Document:\n") + b.WriteString(fmt.Sprintf("%s\n", docString)) + return b.String(), nil +} - tdxQuote, err := abi.QuoteToProto(rawQuote) - if err != nil { - return "", fmt.Errorf("converting quote to proto: %w", err) - } - quote, ok := tdxQuote.(*tdx.QuoteV4) - if !ok { - return "", fmt.Errorf("unexpected quote type: %T", tdxQuote) - } - - quoteJSON, err := json.Marshal(quote) - return string(quoteJSON), err +type defaultAttestationDocFormatter struct { + log debugLog } // format returns the formatted attestation doc. -func formatDefault(ctx context.Context, docString []byte, attestationCfg config.AttestationCfg, log debugLog, +func (f *defaultAttestationDocFormatter) format(ctx context.Context, docString string, PCRsOnly bool, + attestationCfg config.AttestationCfg, ) (string, error) { b := &strings.Builder{} b.WriteString("Attestation Document:\n") - doc, err := unmarshalAttDoc(docString, attestationCfg.GetVariant()) - if err != nil { - return "", fmt.Errorf("unmarshalling attestation document: %w", err) + var doc attestationDoc + if err := json.Unmarshal([]byte(docString), &doc); err != nil { + return "", fmt.Errorf("unmarshal attestation document: %w", err) } - if err := parseQuotes(b, doc.Attestation.Quotes, attestationCfg.GetMeasurements()); err != nil { + if err := f.parseQuotes(b, doc.Attestation.Quotes, attestationCfg.GetMeasurements()); err != nil { return "", fmt.Errorf("parse quote: %w", err) } - - // If we have a non SNP variant, print only the PCRs - if !(attestationCfg.GetVariant().Equal(variant.AzureSEVSNP{}) || - attestationCfg.GetVariant().Equal(variant.AWSSEVSNP{}) || - attestationCfg.GetVariant().Equal(variant.GCPSEVSNP{})) { + if PCRsOnly { return b.String(), nil } - // SNP reports contain extra information that we can print - var instanceInfo snp.InstanceInfo - if err := json.Unmarshal(doc.InstanceInfo, &instanceInfo); err != nil { + instanceInfo, err := extractInstanceInfo(doc) + if err != nil { return "", fmt.Errorf("unmarshalling instance info: %w", err) } - report, err := verify.NewReport(ctx, instanceInfo, attestationCfg, log) + report, err := verify.NewReport(ctx, instanceInfo, attestationCfg, f.log) if err != nil { return "", fmt.Errorf("parsing SNP report: %w", err) } @@ -340,7 +336,7 @@ func formatDefault(ctx context.Context, docString []byte, attestationCfg config. } // parseQuotes parses the base64-encoded quotes and writes their details to the output builder. -func parseQuotes(b *strings.Builder, quotes []*tpmProto.Quote, expectedPCRs measurements.M) error { +func (f *defaultAttestationDocFormatter) parseQuotes(b *strings.Builder, quotes []*tpmProto.Quote, expectedPCRs measurements.M) error { writeIndentfln(b, 1, "Quote:") var pcrNumbers []uint32 @@ -367,6 +363,18 @@ func parseQuotes(b *strings.Builder, quotes []*tpmProto.Quote, expectedPCRs meas return nil } +// attestationDoc is the attestation document returned by the verifier. +type attestationDoc struct { + Attestation struct { + AkPub string `json:"ak_pub"` + Quotes []*tpmProto.Quote `json:"quotes"` + EventLog string `json:"event_log"` + TeeAttestation interface{} `json:"TeeAttestation"` + } `json:"Attestation"` + InstanceInfo string `json:"InstanceInfo"` + UserData string `json:"UserData"` +} + type constellationVerifier struct { dialer grpcInsecureDialer log debugLog @@ -375,11 +383,11 @@ type constellationVerifier struct { // Verify retrieves an attestation statement from the Constellation and verifies it using the validator. func (v *constellationVerifier) Verify( ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator, -) ([]byte, error) { +) (string, error) { v.log.Debug(fmt.Sprintf("Dialing endpoint: %q", endpoint)) - conn, err := v.dialer.DialInsecure(endpoint) + conn, err := v.dialer.DialInsecure(ctx, endpoint) if err != nil { - return nil, fmt.Errorf("dialing init server: %w", err) + return "", fmt.Errorf("dialing init server: %w", err) } defer conn.Close() @@ -388,28 +396,28 @@ func (v *constellationVerifier) Verify( v.log.Debug("Sending attestation request") resp, err := client.GetAttestation(ctx, req) if err != nil { - return nil, fmt.Errorf("getting attestation: %w", err) + return "", fmt.Errorf("getting attestation: %w", err) } v.log.Debug("Verifying attestation") signedData, err := validator.Validate(ctx, resp.Attestation, req.Nonce) if err != nil { - return nil, fmt.Errorf("validating attestation: %w", err) + return "", fmt.Errorf("validating attestation: %w", err) } if !bytes.Equal(signedData, []byte(constants.ConstellationVerifyServiceUserData)) { - return nil, errors.New("signed data in attestation does not match expected user data") + return "", errors.New("signed data in attestation does not match expected user data") } - return resp.Attestation, nil + return string(resp.Attestation), nil } type verifyClient interface { - Verify(ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator) ([]byte, error) + Verify(ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator) (string, error) } type grpcInsecureDialer interface { - DialInsecure(endpoint string) (conn *grpc.ClientConn, err error) + DialInsecure(ctx context.Context, endpoint string) (conn *grpc.ClientConn, err error) } // writeIndentfln writes a formatted string to the builder with the given indentation level @@ -421,6 +429,19 @@ func writeIndentfln(b *strings.Builder, indentLvl int, format string, args ...an b.WriteString(fmt.Sprintf(format+"\n", args...)) } +func extractInstanceInfo(doc attestationDoc) (snp.InstanceInfo, error) { + instanceInfoString, err := base64.StdEncoding.DecodeString(doc.InstanceInfo) + if err != nil { + return snp.InstanceInfo{}, fmt.Errorf("decode instance info: %w", err) + } + + var instanceInfo snp.InstanceInfo + if err := json.Unmarshal(instanceInfoString, &instanceInfo); err != nil { + return snp.InstanceInfo{}, fmt.Errorf("unmarshal instance info: %w", err) + } + return instanceInfo, nil +} + func addPortIfMissing(endpoint string, defaultPort int) (string, error) { if endpoint == "" { return "", errors.New("endpoint is empty") @@ -446,7 +467,7 @@ func updateInitMeasurements(config config.AttestationCfg, ownerID, clusterID str switch config.GetVariant() { case variant.AWSNitroTPM{}, variant.AWSSEVSNP{}, variant.AzureTrustedLaunch{}, variant.AzureSEVSNP{}, variant.AzureTDX{}, // AzureTDX also uses a vTPM for measurements - variant.GCPSEVES{}, variant.GCPSEVSNP{}, + variant.GCPSEVES{}, variant.QEMUVTPM{}: if err := updateMeasurementTPM(m, uint32(measurements.PCRIndexOwnerID), ownerID); err != nil { return err @@ -520,26 +541,3 @@ func decodeMeasurement(encoded string) ([]byte, error) { } return decoded, nil } - -func unmarshalAttDoc(attDocJSON []byte, attestationVariant variant.Variant) (vtpm.AttestationDocument, error) { - attDoc := vtpm.AttestationDocument{ - Attestation: &attest.Attestation{}, - } - - // Explicitly initialize this struct, as TeeAttestation - // is a "oneof" protobuf field, which needs an explicit - // type to be set to be unmarshaled correctly. - switch attestationVariant { - case variant.AzureTDX{}: - attDoc.Attestation.TeeAttestation = &attest.Attestation_TdxAttestation{ - TdxAttestation: &tdx.QuoteV4{}, - } - default: - attDoc.Attestation.TeeAttestation = &attest.Attestation_SevSnpAttestation{ - SevSnpAttestation: &sevsnp.Attestation{}, - } - } - - err := json.Unmarshal(attDocJSON, &attDoc) - return attDoc, err -} diff --git a/cli/internal/cmd/verify_test.go b/cli/internal/cmd/verify_test.go index 4a140d8ed..a695a7c2f 100644 --- a/cli/internal/cmd/verify_test.go +++ b/cli/internal/cmd/verify_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -47,6 +47,7 @@ func TestVerify(t *testing.T) { testCases := map[string]struct { provider cloudprovider.Provider protoClient *stubVerifyClient + formatter *stubAttDocFormatter nodeEndpointFlag string clusterIDFlag string stateFile *state.State @@ -61,6 +62,7 @@ func TestVerify(t *testing.T) { protoClient: &stubVerifyClient{}, stateFile: defaultStateFile(cloudprovider.GCP), wantEndpoint: "192.0.2.1:1234", + formatter: &stubAttDocFormatter{}, }, "azure": { provider: cloudprovider.Azure, @@ -69,6 +71,7 @@ func TestVerify(t *testing.T) { protoClient: &stubVerifyClient{}, stateFile: defaultStateFile(cloudprovider.Azure), wantEndpoint: "192.0.2.1:1234", + formatter: &stubAttDocFormatter{}, }, "default port": { provider: cloudprovider.GCP, @@ -77,6 +80,7 @@ func TestVerify(t *testing.T) { protoClient: &stubVerifyClient{}, stateFile: defaultStateFile(cloudprovider.GCP), wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC), + formatter: &stubAttDocFormatter{}, }, "endpoint not set": { provider: cloudprovider.GCP, @@ -87,7 +91,8 @@ func TestVerify(t *testing.T) { s.Infrastructure.ClusterEndpoint = "" return s }(), - wantErr: true, + formatter: &stubAttDocFormatter{}, + wantErr: true, }, "endpoint from state file": { provider: cloudprovider.GCP, @@ -99,6 +104,7 @@ func TestVerify(t *testing.T) { return s }(), wantEndpoint: "192.0.2.1:" + strconv.Itoa(constants.VerifyServiceNodePortGRPC), + formatter: &stubAttDocFormatter{}, }, "override endpoint from details file": { provider: cloudprovider.GCP, @@ -111,6 +117,7 @@ func TestVerify(t *testing.T) { return s }(), wantEndpoint: "192.0.2.2:1234", + formatter: &stubAttDocFormatter{}, }, "invalid endpoint": { provider: cloudprovider.GCP, @@ -118,6 +125,7 @@ func TestVerify(t *testing.T) { clusterIDFlag: zeroBase64, protoClient: &stubVerifyClient{}, stateFile: defaultStateFile(cloudprovider.GCP), + formatter: &stubAttDocFormatter{}, wantErr: true, }, "neither owner id nor cluster id set": { @@ -129,6 +137,7 @@ func TestVerify(t *testing.T) { s.ClusterValues.ClusterID = "" return s }(), + formatter: &stubAttDocFormatter{}, protoClient: &stubVerifyClient{}, wantErr: true, }, @@ -142,12 +151,14 @@ func TestVerify(t *testing.T) { return s }(), wantEndpoint: "192.0.2.1:1234", + formatter: &stubAttDocFormatter{}, }, "config file not existing": { provider: cloudprovider.GCP, clusterIDFlag: zeroBase64, nodeEndpointFlag: "192.0.2.1:1234", stateFile: defaultStateFile(cloudprovider.GCP), + formatter: &stubAttDocFormatter{}, skipConfigCreation: true, wantErr: true, }, @@ -157,6 +168,7 @@ func TestVerify(t *testing.T) { clusterIDFlag: zeroBase64, protoClient: &stubVerifyClient{verifyErr: rpcStatus.Error(codes.Internal, "failed")}, stateFile: defaultStateFile(cloudprovider.Azure), + formatter: &stubAttDocFormatter{}, wantErr: true, }, "error protoClient GetState not rpc": { @@ -165,19 +177,18 @@ func TestVerify(t *testing.T) { clusterIDFlag: zeroBase64, protoClient: &stubVerifyClient{verifyErr: someErr}, stateFile: defaultStateFile(cloudprovider.Azure), + formatter: &stubAttDocFormatter{}, wantErr: true, }, - "state file is not required if flags are given": { + "format error": { provider: cloudprovider.Azure, nodeEndpointFlag: "192.0.2.1:1234", clusterIDFlag: zeroBase64, protoClient: &stubVerifyClient{}, + stateFile: defaultStateFile(cloudprovider.Azure), wantEndpoint: "192.0.2.1:1234", - }, - "no state file and no flags": { - provider: cloudprovider.Azure, - protoClient: &stubVerifyClient{}, - wantErr: true, + formatter: &stubAttDocFormatter{formatErr: someErr}, + wantErr: true, }, } @@ -195,9 +206,7 @@ func TestVerify(t *testing.T) { cfg := defaultConfigWithExpectedMeasurements(t, config.Default(), tc.provider) require.NoError(fileHandler.WriteYAML(constants.ConfigFilename, cfg)) } - if tc.stateFile != nil { - require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename)) - } + require.NoError(tc.stateFile.WriteToFile(fileHandler, constants.StateFilename)) v := &verifyCmd{ fileHandler: fileHandler, @@ -205,10 +214,12 @@ func TestVerify(t *testing.T) { flags: verifyFlags{ clusterID: tc.clusterIDFlag, endpoint: tc.nodeEndpointFlag, - output: "raw", }, } - err := v.verify(cmd, tc.protoClient, stubAttestationFetcher{}) + formatterFac := func(_ string, _ variant.Variant, _ debugLog) (attestationDocFormatter, error) { + return tc.formatter, nil + } + err := v.verify(cmd, tc.protoClient, formatterFac, stubAttestationFetcher{}) if tc.wantErr { assert.Error(err) } else { @@ -220,22 +231,36 @@ func TestVerify(t *testing.T) { } } -func TestFormatDefault(t *testing.T) { +type stubAttDocFormatter struct { + formatErr error +} + +func (f *stubAttDocFormatter) format(_ context.Context, _ string, _ bool, _ config.AttestationCfg) (string, error) { + return "", f.formatErr +} + +func TestFormat(t *testing.T) { + formatter := func() *defaultAttestationDocFormatter { + return &defaultAttestationDocFormatter{ + log: logger.NewTest(t), + } + } + testCases := map[string]struct { - doc []byte - attCfg config.AttestationCfg - wantErr bool + formatter *defaultAttestationDocFormatter + doc string + wantErr bool }{ "invalid doc": { - doc: []byte("invalid"), - attCfg: &config.AzureSEVSNP{}, - wantErr: true, + formatter: formatter(), + doc: "invalid", + wantErr: true, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - _, err := formatDefault(t.Context(), tc.doc, tc.attCfg, logger.NewTest(t)) + _, err := tc.formatter.format(context.Background(), tc.doc, false, nil) if tc.wantErr { assert.Error(t, err) } else { @@ -313,7 +338,7 @@ func TestVerifyClient(t *testing.T) { Nonce: tc.nonce, } - _, err = verifier.Verify(t.Context(), addr, request, atls.NewFakeValidator(variant.Dummy{})) + _, err = verifier.Verify(context.Background(), addr, request, atls.NewFakeValidator(variant.Dummy{})) if tc.wantErr { assert.Error(err) @@ -329,9 +354,9 @@ type stubVerifyClient struct { endpoint string } -func (c *stubVerifyClient) Verify(_ context.Context, endpoint string, _ *verifyproto.GetAttestationRequest, _ atls.Validator) ([]byte, error) { +func (c *stubVerifyClient) Verify(_ context.Context, endpoint string, _ *verifyproto.GetAttestationRequest, _ atls.Validator) (string, error) { c.endpoint = endpoint - return nil, c.verifyErr + return "", c.verifyErr } type stubVerifyAPI struct { @@ -477,8 +502,9 @@ func TestParseQuotes(t *testing.T) { assert := assert.New(t) b := &strings.Builder{} + parser := &defaultAttestationDocFormatter{} - err := parseQuotes(b, tc.quotes, tc.expectedPCRs) + err := parser.parseQuotes(b, tc.quotes, tc.expectedPCRs) if tc.wantErr { assert.Error(err) } else { diff --git a/cli/internal/cmd/version.go b/cli/internal/cmd/version.go index 30ce98245..a61aee437 100644 --- a/cli/internal/cmd/version.go +++ b/cli/internal/cmd/version.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/cmd/version_test.go b/cli/internal/cmd/version_test.go index f68041c66..646244423 100644 --- a/cli/internal/cmd/version_test.go +++ b/cli/internal/cmd/version_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/cli/internal/libvirt/BUILD.bazel b/cli/internal/libvirt/BUILD.bazel index aa030ee5e..d89676216 100644 --- a/cli/internal/libvirt/BUILD.bazel +++ b/cli/internal/libvirt/BUILD.bazel @@ -7,9 +7,9 @@ go_library( visibility = ["//:__subpackages__"], deps = [ "//internal/file", + "@com_github_docker_docker//api/types", "@com_github_docker_docker//api/types/container", "@com_github_docker_docker//api/types/filters", - "@com_github_docker_docker//api/types/image", "@com_github_docker_docker//client", "@com_github_spf13_afero//:afero", ], diff --git a/cli/internal/libvirt/libvirt.go b/cli/internal/libvirt/libvirt.go index 5815ebfc5..f22cc4a65 100644 --- a/cli/internal/libvirt/libvirt.go +++ b/cli/internal/libvirt/libvirt.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -17,9 +17,9 @@ import ( "fmt" "io" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" docker "github.com/docker/docker/client" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/spf13/afero" @@ -101,7 +101,7 @@ func (r *Runner) Start(ctx context.Context, name, imageName string) error { func (r *Runner) startNewContainer(ctx context.Context, docker *docker.Client, containerName, imageName string) error { // check if image exists locally, if not pull it // this allows us to use a custom image without having to push it to a registry - images, err := docker.ImageList(ctx, image.ListOptions{ + images, err := docker.ImageList(ctx, types.ImageListOptions{ Filters: filters.NewArgs( filters.KeyValuePair{ Key: "reference", @@ -113,7 +113,7 @@ func (r *Runner) startNewContainer(ctx context.Context, docker *docker.Client, c return err } if len(images) == 0 { - reader, err := docker.ImagePull(ctx, imageName, image.PullOptions{}) + reader, err := docker.ImagePull(ctx, imageName, types.ImagePullOptions{}) if err != nil { return fmt.Errorf("failed to pull image %q: %w", imageName, err) } diff --git a/cli/internal/terraform/loader.go b/cli/internal/terraform/loader.go index a3ad04482..d6e448ce9 100644 --- a/cli/internal/terraform/loader.go +++ b/cli/internal/terraform/loader.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform diff --git a/cli/internal/terraform/loader_test.go b/cli/internal/terraform/loader_test.go index 70a50240b..4734bba1d 100644 --- a/cli/internal/terraform/loader_test.go +++ b/cli/internal/terraform/loader_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform diff --git a/cli/internal/terraform/logging.go b/cli/internal/terraform/logging.go index 18378d9f9..6a400fb03 100644 --- a/cli/internal/terraform/logging.go +++ b/cli/internal/terraform/logging.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform diff --git a/cli/internal/terraform/terraform.go b/cli/internal/terraform/terraform.go index c9b536109..f48d36e02 100644 --- a/cli/internal/terraform/terraform.go +++ b/cli/internal/terraform/terraform.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -103,18 +103,9 @@ func (c *Client) ShowIAM(ctx context.Context, provider cloudprovider.Provider) ( if !ok { return IAMOutput{}, errors.New("invalid type in service_account_key output: not a string") } - IAMServiceAccountVMOutputRaw, ok := tfState.Values.Outputs["service_account_mail_vm"] - if !ok { - return IAMOutput{}, errors.New("no service_account_mail_vm output found") - } - IAMServiceAccountVMOutput, ok := IAMServiceAccountVMOutputRaw.Value.(string) - if !ok { - return IAMOutput{}, errors.New("invalid type in service_account_mail_vm output: not a string") - } return IAMOutput{ GCP: GCPIAMOutput{ - SaKey: saKeyOutput, - ServiceAccountVMMailAddress: IAMServiceAccountVMOutput, + SaKey: saKeyOutput, }, }, nil case cloudprovider.Azure: @@ -548,8 +539,7 @@ type IAMOutput struct { // GCPIAMOutput contains the output information of the Terraform IAM operation on GCP. type GCPIAMOutput struct { - SaKey string - ServiceAccountVMMailAddress string + SaKey string } // AzureIAMOutput contains the output information of the Terraform IAM operation on Microsoft Azure. diff --git a/cli/internal/terraform/terraform_test.go b/cli/internal/terraform/terraform_test.go index 07ea919e6..103f0e959 100644 --- a/cli/internal/terraform/terraform_test.go +++ b/cli/internal/terraform/terraform_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform @@ -120,7 +120,6 @@ func TestPrepareIAM(t *testing.T) { Region: "europe-west1", Zone: "europe-west1-a", ServiceAccountID: "const-test-case", - NamePrefix: "test_iam", } azureVars := &AzureIAMVariables{ Location: "westus", @@ -483,7 +482,7 @@ func TestCreateCluster(t *testing.T) { path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String())) require.NoError(c.PrepareWorkspace(path, tc.vars)) - infraState, err := c.ApplyCluster(t.Context(), tc.provider, LogLevelDebug) + infraState, err := c.ApplyCluster(context.Background(), tc.provider, LogLevelDebug) if tc.wantErr { assert.Error(err) @@ -510,9 +509,6 @@ func TestCreateIAM(t *testing.T) { "service_account_key": { Value: "12345678_abcdefg", }, - "service_account_mail_vm": { - Value: "test_iam_service_account_vm", - }, "subscription_id": { Value: "test_subscription_id", }, @@ -585,7 +581,7 @@ func TestCreateIAM(t *testing.T) { vars: gcpVars, tf: &stubTerraform{showState: newTestState()}, fs: afero.NewMemMapFs(), - want: IAMOutput{GCP: GCPIAMOutput{SaKey: "12345678_abcdefg", ServiceAccountVMMailAddress: "test_iam_service_account_vm"}}, + want: IAMOutput{GCP: GCPIAMOutput{SaKey: "12345678_abcdefg"}}, }, "gcp init fails": { pathBase: path.Join(constants.TerraformEmbeddedDir, "iam"), @@ -618,25 +614,7 @@ func TestCreateIAM(t *testing.T) { tf: &stubTerraform{ showState: &tfjson.State{ Values: &tfjson.StateValues{ - Outputs: map[string]*tfjson.StateOutput{ - "service_account_mail_vm": {Value: "test_iam_service_account_vm"}, - }, - }, - }, - }, - fs: afero.NewMemMapFs(), - wantErr: true, - }, - "gcp no service_account_mail_vm": { - pathBase: path.Join(constants.TerraformEmbeddedDir, "iam"), - provider: cloudprovider.GCP, - vars: gcpVars, - tf: &stubTerraform{ - showState: &tfjson.State{ - Values: &tfjson.StateValues{ - Outputs: map[string]*tfjson.StateOutput{ - "service_account_key": {Value: "12345678_abcdefg"}, - }, + Outputs: map[string]*tfjson.StateOutput{}, }, }, }, @@ -799,7 +777,7 @@ func TestCreateIAM(t *testing.T) { path := path.Join(tc.pathBase, strings.ToLower(tc.provider.String())) require.NoError(c.PrepareWorkspace(path, tc.vars)) - IAMoutput, err := c.ApplyIAM(t.Context(), tc.provider, LogLevelDebug) + IAMoutput, err := c.ApplyIAM(context.Background(), tc.provider, LogLevelDebug) if tc.wantErr { assert.Error(err) @@ -841,7 +819,7 @@ func TestDestroyInstances(t *testing.T) { tf: tc.tf, } - err := c.Destroy(t.Context(), LogLevelDebug) + err := c.Destroy(context.Background(), LogLevelDebug) if tc.wantErr { assert.Error(err) return @@ -1073,7 +1051,7 @@ func TestPlan(t *testing.T) { workingDir: tc.pathBase, } - _, err := c.Plan(t.Context(), LogLevelDebug) + _, err := c.Plan(context.Background(), LogLevelDebug) if tc.wantErr { require.Error(err) } else { @@ -1132,7 +1110,7 @@ func TestShowPlan(t *testing.T) { workingDir: tc.pathBase, } - err := c.ShowPlan(t.Context(), LogLevelDebug, bytes.NewBuffer(nil)) + err := c.ShowPlan(context.Background(), LogLevelDebug, bytes.NewBuffer(nil)) if tc.wantErr { require.Error(err) } else { @@ -1151,8 +1129,7 @@ func TestShowIAM(t *testing.T) { "GCP success": { tf: &stubTerraform{ showState: getTfjsonState(map[string]any{ - "service_account_key": "key", - "service_account_mail_vm": "example@example.com", + "service_account_key": "key", }), }, csp: cloudprovider.GCP, @@ -1160,8 +1137,7 @@ func TestShowIAM(t *testing.T) { "GCP wrong data type": { tf: &stubTerraform{ showState: getTfjsonState(map[string]any{ - "service_account_key": map[string]any{}, - "service_account_mail_vm": "example@example.com", + "service_account_key": map[string]any{}, }), }, csp: cloudprovider.GCP, @@ -1169,9 +1145,7 @@ func TestShowIAM(t *testing.T) { }, "GCP missing key": { tf: &stubTerraform{ - showState: getTfjsonState(map[string]any{ - "service_account_mail_vm": "example@example.com", - }), + showState: getTfjsonState(map[string]any{}), }, csp: cloudprovider.GCP, wantErr: true, @@ -1320,7 +1294,7 @@ func TestShowIAM(t *testing.T) { tf: tc.tf, } - _, err := c.ShowIAM(t.Context(), tc.csp) + _, err := c.ShowIAM(context.Background(), tc.csp) if tc.wantErr { assert.Error(err) return diff --git a/cli/internal/terraform/variables.go b/cli/internal/terraform/variables.go index d25b2e026..a83818260 100644 --- a/cli/internal/terraform/variables.go +++ b/cli/internal/terraform/variables.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform @@ -9,7 +9,6 @@ package terraform import ( "fmt" - "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" @@ -24,6 +23,11 @@ type Variables interface { // ClusterVariables should be used in places where a cluster is created. type ClusterVariables interface { Variables + // TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. + // GetCreateMAA does not follow Go's naming convention because we need to keep the CreateMAA property public for now. + // There are functions creating Variables objects outside of this package. + // These functions can only be moved into this package once we have introduced an interface for config.Config, + // since we do not want to introduce a dependency on config.Config in this package. GetCreateMAA() bool } @@ -65,11 +69,10 @@ type AWSClusterVariables struct { CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"` // InternalLoadBalancer is true if an internal load balancer should be created. InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"` - // AdditionalTags describes (optional) additional tags that should be applied to created resources. - AdditionalTags cloudprovider.Tags `hcl:"additional_tags" cty:"additional_tags"` } // GetCreateMAA gets the CreateMAA variable. +// TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. func (a *AWSClusterVariables) GetCreateMAA() bool { return false } @@ -133,15 +136,10 @@ type GCPClusterVariables struct { CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"` // InternalLoadBalancer is true if an internal load balancer should be created. InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"` - // CCTechnology is the confidential computing technology to use on the VMs. (`SEV` or `SEV_SNP`) - CCTechnology string `hcl:"cc_technology" cty:"cc_technology"` - // IAMServiceAccountControlPlane is the IAM service account mail address to attach to VMs. - IAMServiceAccountVM string `hcl:"iam_service_account_vm" cty:"iam_service_account_vm"` - // AdditionalLables are (optional) additional labels that should be applied to created resources. - AdditionalLabels cloudprovider.Tags `hcl:"additional_labels" cty:"additional_labels"` } // GetCreateMAA gets the CreateMAA variable. +// TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. func (g *GCPClusterVariables) GetCreateMAA() bool { return false } @@ -167,7 +165,7 @@ type GCPNodeGroup struct { DiskType string `hcl:"disk_type" cty:"disk_type"` } -// GCPIAMVariables is user configuration for creating the IAM configuration with Terraform on GCP. +// GCPIAMVariables is user configuration for creating the IAM confioguration with Terraform on GCP. type GCPIAMVariables struct { // Project is the ID of the GCP project to use. Project string `hcl:"project_id" cty:"project_id"` @@ -177,8 +175,6 @@ type GCPIAMVariables struct { Zone string `hcl:"zone" cty:"zone"` // ServiceAccountID is the ID of the service account to use. ServiceAccountID string `hcl:"service_account_id" cty:"service_account_id"` - // NamePrefix is a prefix applied to the service account ID and VM ID created by this configuration. - NamePrefix string `hcl:"name_prefix,optional" cty:"name_prefix"` } // String returns a string representation of the IAM-specific variables, formatted as Terraform variables. @@ -190,8 +186,6 @@ func (v *GCPIAMVariables) String() string { // AzureClusterVariables is user configuration for creating a cluster with Terraform on Azure. type AzureClusterVariables struct { - // SubscriptionID is the Azure subscription ID to use. - SubscriptionID string `hcl:"subscription_id" cty:"subscription_id"` // Name of the cluster. Name string `hcl:"name" cty:"name"` // ImageID is the ID of the Azure image to use. @@ -218,11 +212,10 @@ type AzureClusterVariables struct { InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"` // MarketplaceImage is the (optional) Azure Marketplace image to use. MarketplaceImage *AzureMarketplaceImageVariables `hcl:"marketplace_image" cty:"marketplace_image"` - // AdditionalTags are (optional) additional tags that get applied to created resources. - AdditionalTags cloudprovider.Tags `hcl:"additional_tags" cty:"additional_tags"` } // GetCreateMAA gets the CreateMAA variable. +// TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. func (a *AzureClusterVariables) GetCreateMAA() bool { if a.CreateMAA == nil { return false @@ -252,8 +245,6 @@ type AzureNodeGroup struct { // AzureIAMVariables is user configuration for creating the IAM configuration with Terraform on Microsoft Azure. type AzureIAMVariables struct { - // SubscriptionID is the Azure subscription ID to use. - SubscriptionID string `hcl:"subscription_id,optional" cty:"subscription_id"` // TODO(v2.18): remove optional tag. This is only required for migration from var files that dont have the value yet. // Location is the Azure location to use. (e.g. westus) Location string `hcl:"location" cty:"location"` // ServicePrincipal is the name of the service principal to use. @@ -302,11 +293,11 @@ type OpenStackClusterVariables struct { // CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server. CustomEndpoint string `hcl:"custom_endpoint" cty:"custom_endpoint"` // InternalLoadBalancer is true if an internal load balancer should be created. - InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"` - AdditionalTags []string `hcl:"additional_tags" cty:"additional_tags"` + InternalLoadBalancer bool `hcl:"internal_load_balancer" cty:"internal_load_balancer"` } // GetCreateMAA gets the CreateMAA variable. +// TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. func (o *OpenStackClusterVariables) GetCreateMAA() bool { return false } @@ -379,6 +370,7 @@ type QEMUVariables struct { } // GetCreateMAA gets the CreateMAA variable. +// TODO(derpsteb): Rename this function once we have introduced an interface for config.Config. func (q *QEMUVariables) GetCreateMAA() bool { return false } diff --git a/cli/internal/terraform/variables_test.go b/cli/internal/terraform/variables_test.go index dc8f79b2d..df27ddb59 100644 --- a/cli/internal/terraform/variables_test.go +++ b/cli/internal/terraform/variables_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform @@ -76,7 +76,6 @@ node_groups = { } custom_endpoint = "example.com" internal_load_balancer = false -additional_tags = null ` got := vars.String() assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences @@ -122,9 +121,7 @@ func TestGCPClusterVariables(t *testing.T) { DiskType: "pd-ssd", }, }, - CustomEndpoint: "example.com", - CCTechnology: "SEV_SNP", - IAMServiceAccountVM: "example@example.com", + CustomEndpoint: "example.com", } // test that the variables are correctly rendered @@ -152,11 +149,8 @@ node_groups = { zone = "eu-central-1b" } } -custom_endpoint = "example.com" -internal_load_balancer = false -cc_technology = "SEV_SNP" -iam_service_account_vm = "example@example.com" -additional_labels = null +custom_endpoint = "example.com" +internal_load_balancer = false ` got := vars.String() assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences @@ -175,33 +169,14 @@ func TestGCPIAMVariables(t *testing.T) { region = "eu-central-1" zone = "eu-central-1a" service_account_id = "my-service-account" -name_prefix = "" ` got := vars.String() assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences - - vars = GCPIAMVariables{ - Project: "my-project", - Region: "eu-central-1", - Zone: "eu-central-1a", - NamePrefix: "my-prefix", - } - - // test that the variables are correctly rendered - want = `project_id = "my-project" -region = "eu-central-1" -zone = "eu-central-1a" -service_account_id = "" -name_prefix = "my-prefix" -` - got = vars.String() - assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences } func TestAzureClusterVariables(t *testing.T) { vars := AzureClusterVariables{ - SubscriptionID: "01234567-cdef-0123-4567-89abcdef0123", - Name: "cluster-name", + Name: "cluster-name", NodeGroups: map[string]AzureNodeGroup{ constants.ControlPlaneDefault: { Role: "ControlPlane", @@ -228,8 +203,7 @@ func TestAzureClusterVariables(t *testing.T) { } // test that the variables are correctly rendered - want := `subscription_id = "01234567-cdef-0123-4567-89abcdef0123" -name = "cluster-name" + want := `name = "cluster-name" image_id = "image-0123456789abcdef" create_maa = true debug = true @@ -255,7 +229,6 @@ marketplace_image = { publisher = "edgelesssys" version = "2.13.0" } -additional_tags = null ` got := vars.String() assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences @@ -263,15 +236,13 @@ additional_tags = null func TestAzureIAMVariables(t *testing.T) { vars := AzureIAMVariables{ - SubscriptionID: "01234567-cdef-0123-4567-89abcdef0123", Location: "eu-central-1", ServicePrincipal: "my-service-principal", ResourceGroup: "my-resource-group", } // test that the variables are correctly rendered - want := `subscription_id = "01234567-cdef-0123-4567-89abcdef0123" -location = "eu-central-1" + want := `location = "eu-central-1" service_principal_name = "my-service-principal" resource_group_name = "my-resource-group" ` @@ -321,7 +292,6 @@ image_id = "8e10b92d-8f7a-458c-91c6-59b42f82ef81" debug = true custom_endpoint = "example.com" internal_load_balancer = false -additional_tags = null ` got := vars.String() assert.Equal(t, strings.Fields(want), strings.Fields(got)) // to ignore whitespace differences diff --git a/cli/main.go b/cli/main.go index 0d479766d..7687463bd 100644 --- a/cli/main.go +++ b/cli/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/csi/cryptmapper/cryptmapper.go b/csi/cryptmapper/cryptmapper.go index 44757d703..90ece1df2 100644 --- a/csi/cryptmapper/cryptmapper.go +++ b/csi/cryptmapper/cryptmapper.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package cryptmapper provides a wrapper around libcryptsetup to manage dm-crypt volumes for CSI drivers. diff --git a/csi/cryptmapper/cryptmapper_cgo.go b/csi/cryptmapper/cryptmapper_cgo.go index 0ba881d6d..f03a48bbb 100644 --- a/csi/cryptmapper/cryptmapper_cgo.go +++ b/csi/cryptmapper/cryptmapper_cgo.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cryptmapper diff --git a/csi/cryptmapper/cryptmapper_cross.go b/csi/cryptmapper/cryptmapper_cross.go index 0e6f6ba34..ddc4f4adc 100644 --- a/csi/cryptmapper/cryptmapper_cross.go +++ b/csi/cryptmapper/cryptmapper_cross.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cryptmapper diff --git a/csi/cryptmapper/cryptmapper_test.go b/csi/cryptmapper/cryptmapper_test.go index 56c44e8ee..cef34cd18 100644 --- a/csi/cryptmapper/cryptmapper_test.go +++ b/csi/cryptmapper/cryptmapper_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cryptmapper @@ -202,7 +202,7 @@ func TestOpenCryptDevice(t *testing.T) { getDiskFormat: tc.diskInfo, } - out, err := mapper.OpenCryptDevice(t.Context(), tc.source, tc.volumeID, tc.integrity) + out, err := mapper.OpenCryptDevice(context.Background(), tc.source, tc.volumeID, tc.integrity) if tc.wantErr { assert.Error(err) } else { @@ -223,7 +223,7 @@ func TestOpenCryptDevice(t *testing.T) { kms: &fakeKMS{}, getDiskFormat: getDiskFormat, } - _, err := mapper.OpenCryptDevice(t.Context(), "/dev/some-device", "volume01", false) + _, err := mapper.OpenCryptDevice(context.Background(), "/dev/some-device", "volume01", false) assert.NoError(t, err) } @@ -270,7 +270,7 @@ func TestResizeCryptDevice(t *testing.T) { mapper: testMapper(tc.device), } - res, err := mapper.ResizeCryptDevice(t.Context(), tc.volumeID) + res, err := mapper.ResizeCryptDevice(context.Background(), tc.volumeID) if tc.wantErr { assert.Error(err) } else { diff --git a/csi/kms/BUILD.bazel b/csi/kms/BUILD.bazel index 81dd9ecd8..19d174a01 100644 --- a/csi/kms/BUILD.bazel +++ b/csi/kms/BUILD.bazel @@ -8,7 +8,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//keyservice/keyserviceproto", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) @@ -20,7 +20,7 @@ go_test( deps = [ "//keyservice/keyserviceproto", "@com_github_stretchr_testify//assert", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//test/bufconn", "@org_uber_go_goleak//:goleak", ], diff --git a/csi/kms/constellation.go b/csi/kms/constellation.go index 95e4f848e..dbc310bdb 100644 --- a/csi/kms/constellation.go +++ b/csi/kms/constellation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kms @@ -31,7 +31,7 @@ func NewConstellationKMS(endpoint string) *ConstellationKMS { // GetDEK request a data encryption key derived from the Constellation's master secret. func (k *ConstellationKMS) GetDEK(ctx context.Context, dekID string, dekSize int) ([]byte, error) { - conn, err := grpc.NewClient(k.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, k.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/csi/kms/constellation_test.go b/csi/kms/constellation_test.go index 134404491..3b82a8c1c 100644 --- a/csi/kms/constellation_test.go +++ b/csi/kms/constellation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kms @@ -57,7 +57,7 @@ func TestConstellationKMS(t *testing.T) { endpoint: listener.Addr().String(), kms: tc.kms, } - res, err := kms.GetDEK(t.Context(), "data-key", 64) + res, err := kms.GetDEK(context.Background(), "data-key", 64) if tc.wantErr { assert.Error(err) diff --git a/csi/test/BUILD.bazel b/csi/test/BUILD.bazel index c2c5b1071..b62498c20 100644 --- a/csi/test/BUILD.bazel +++ b/csi/test/BUILD.bazel @@ -28,8 +28,6 @@ go_test( "RM": "$(rlocationpath @coreutils//:bin/rm)", "UMOUNT": "$(rlocationpath @util-linux//:bin/umount)", }, - # This test frequently runs into https://github.com/martinjungblut/go-cryptsetup/issues/13. - flaky = 1, # keep tags = [ "integration", @@ -42,14 +40,14 @@ go_test( "//csi/cryptmapper", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", ], "@io_bazel_rules_go//go/platform:linux": [ "//csi/cryptmapper", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", ], "//conditions:default": [], diff --git a/csi/test/mount_integration_test.go b/csi/test/mount_integration_test.go index c22371c2e..986636bf1 100644 --- a/csi/test/mount_integration_test.go +++ b/csi/test/mount_integration_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package integration @@ -31,7 +31,7 @@ const ( deviceName string = "testDeviceName" ) -var toolsEnvs = []string{"CP", "DD", "RM", "FSCK_EXT4", "MKFS_EXT4", "BLKID", "FSCK", "MOUNT", "UMOUNT"} +var toolsEnvs []string = []string{"CP", "DD", "RM", "FSCK_EXT4", "MKFS_EXT4", "BLKID", "FSCK", "MOUNT", "UMOUNT"} // addToolsToPATH is used to update the PATH to contain necessary tool binaries for // coreutils, util-linux and ext4. @@ -105,7 +105,7 @@ func TestOpenAndClose(t *testing.T) { mapper := cryptmapper.New(&fakeKMS{}) - newPath, err := mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, false) + newPath, err := mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, false) require.NoError(err) defer func() { _ = mapper.CloseCryptDevice(deviceName) @@ -119,14 +119,14 @@ func TestOpenAndClose(t *testing.T) { assert.True(os.IsNotExist(err)) // Opening the same device should return the same path and not error - newPath2, err := mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, false) + newPath2, err := mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, false) require.NoError(err) assert.Equal(newPath, newPath2) // Resize the device resize(devicePath) - resizedPath, err := mapper.ResizeCryptDevice(t.Context(), deviceName) + resizedPath, err := mapper.ResizeCryptDevice(context.Background(), deviceName) require.NoError(err) assert.Equal("/dev/mapper/"+deviceName, resizedPath) @@ -137,7 +137,7 @@ func TestOpenAndClose(t *testing.T) { assert.True(os.IsNotExist(err)) // check if we can reopen the device - _, err = mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, true) + _, err = mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, true) assert.NoError(err) assert.NoError(mapper.CloseCryptDevice(deviceName)) } @@ -150,7 +150,7 @@ func TestOpenAndCloseIntegrity(t *testing.T) { mapper := cryptmapper.New(&fakeKMS{}) - newPath, err := mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, true) + newPath, err := mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, true) require.NoError(err) assert.Equal("/dev/mapper/"+deviceName, newPath) @@ -162,13 +162,13 @@ func TestOpenAndCloseIntegrity(t *testing.T) { assert.NoError(err) // Opening the same device should return the same path and not error - newPath2, err := mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, true) + newPath2, err := mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, true) require.NoError(err) assert.Equal(newPath, newPath2) // integrity devices do not support resizing resize(devicePath) - _, err = mapper.ResizeCryptDevice(t.Context(), deviceName) + _, err = mapper.ResizeCryptDevice(context.Background(), deviceName) assert.Error(err) assert.NoError(mapper.CloseCryptDevice(deviceName)) @@ -181,7 +181,7 @@ func TestOpenAndCloseIntegrity(t *testing.T) { assert.True(os.IsNotExist(err)) // check if we can reopen the device - _, err = mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, true) + _, err = mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, true) assert.NoError(err) assert.NoError(mapper.CloseCryptDevice(deviceName)) } @@ -194,13 +194,13 @@ func TestDeviceCloning(t *testing.T) { mapper := cryptmapper.New(&dynamicKMS{}) - _, err := mapper.OpenCryptDevice(t.Context(), devicePath, deviceName, false) + _, err := mapper.OpenCryptDevice(context.Background(), devicePath, deviceName, false) assert.NoError(err) require.NoError(cp(devicePath, devicePath+"-copy")) defer teardown(devicePath + "-copy") - _, err = mapper.OpenCryptDevice(t.Context(), devicePath+"-copy", deviceName+"-copy", false) + _, err = mapper.OpenCryptDevice(context.Background(), devicePath+"-copy", deviceName+"-copy", false) assert.NoError(err) assert.NoError(mapper.CloseCryptDevice(deviceName)) @@ -220,7 +220,7 @@ func TestConcurrency(t *testing.T) { wg := sync.WaitGroup{} runTest := func(path, name string) { - newPath, err := mapper.OpenCryptDevice(t.Context(), path, name, false) + newPath, err := mapper.OpenCryptDevice(context.Background(), path, name, false) assert.NoError(err) defer func() { _ = mapper.CloseCryptDevice(name) diff --git a/debugd/cmd/cdbg/cdbg.go b/debugd/cmd/cdbg/cdbg.go index d7962a8c4..24d00f21f 100644 --- a/debugd/cmd/cdbg/cdbg.go +++ b/debugd/cmd/cdbg/cdbg.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/debugd/cmd/debugd/debugd.go b/debugd/cmd/debugd/debugd.go index 02ebfc96e..4140687f7 100644 --- a/debugd/cmd/debugd/debugd.go +++ b/debugd/cmd/debugd/debugd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/debugd/filebeat/Dockerfile b/debugd/filebeat/Dockerfile index f5badcf9a..58f59726b 100644 --- a/debugd/filebeat/Dockerfile +++ b/debugd/filebeat/Dockerfile @@ -1,9 +1,7 @@ -FROM fedora:40@sha256:3c86d25fef9d2001712bc3d9b091fc40cf04be4767e48f1aa3b785bf58d300ed AS release +FROM fedora:38@sha256:3f01c8f79691df76331cb4bb0944794a60850475e859c15e49513fcbe0a3d88a AS release RUN dnf install -y https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.6.2-x86_64.rpm -RUN dnf install -y systemd-libs - COPY debugd/filebeat/templates/ /usr/share/constellogs/templates/ ENTRYPOINT ["/usr/share/filebeat/bin/filebeat", "-e", "--path.home", "/usr/share/filebeat", "--path.data", "/usr/share/filebeat/data"] diff --git a/debugd/filebeat/assets.go b/debugd/filebeat/assets.go index 204b1a3ec..744ef3799 100644 --- a/debugd/filebeat/assets.go +++ b/debugd/filebeat/assets.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package filebeat diff --git a/debugd/internal/cdbg/cmd/BUILD.bazel b/debugd/internal/cdbg/cmd/BUILD.bazel index e48d0d5d1..94420b1c1 100644 --- a/debugd/internal/cdbg/cmd/BUILD.bazel +++ b/debugd/internal/cdbg/cmd/BUILD.bazel @@ -22,7 +22,7 @@ go_library( "//internal/logger", "@com_github_spf13_afero//:afero", "@com_github_spf13_cobra//:cobra", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) diff --git a/debugd/internal/cdbg/cmd/deploy.go b/debugd/internal/cdbg/cmd/deploy.go index ea7569d28..95c3d147e 100644 --- a/debugd/internal/cdbg/cmd/deploy.go +++ b/debugd/internal/cdbg/cmd/deploy.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -203,7 +203,8 @@ type closeAndWait func() // newDebugdClient creates a new gRPC client for the debugd service and logs the connection state changes. func newDebugdClient(ctx context.Context, ip string, log *slog.Logger) (pb.DebugdClient, closeAndWait, error) { - conn, err := grpc.NewClient( + conn, err := grpc.DialContext( + ctx, net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)), grpc.WithTransportCredentials(insecure.NewCredentials()), logger.GetClientUnaryInterceptor(log), @@ -270,11 +271,9 @@ func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpoin case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED: in.log.Info("Files already uploaded") case pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED: - return fmt.Errorf("uploading files to %v failed: %s: %s", in.debugdEndpoint, uploadResponse.Status, uploadResponse.Error) + return fmt.Errorf("uploading files to %v failed: %v", in.debugdEndpoint, uploadResponse) case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED: return fmt.Errorf("upload already started on %v", in.debugdEndpoint) - case pb.UploadFilesStatus_UPLOAD_FILES_START_FAILED: - return fmt.Errorf("overriding service units failed on %v: %s: %s", in.debugdEndpoint, uploadResponse.Status, uploadResponse.Error) default: return fmt.Errorf("unknown upload status %v", uploadResponse.Status) } diff --git a/debugd/internal/cdbg/cmd/root.go b/debugd/internal/cdbg/cmd/root.go index 436b524a8..b9b3fae67 100644 --- a/debugd/internal/cdbg/cmd/root.go +++ b/debugd/internal/cdbg/cmd/root.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package cmd contains the cdbg CLI. diff --git a/debugd/internal/debugd/constants.go b/debugd/internal/debugd/constants.go index e831fc8cb..ae3aab8b3 100644 --- a/debugd/internal/debugd/constants.go +++ b/debugd/internal/debugd/constants.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package debugd diff --git a/debugd/internal/debugd/debugd.go b/debugd/internal/debugd/debugd.go index 645447c0a..d5453dbf8 100644 --- a/debugd/internal/debugd/debugd.go +++ b/debugd/internal/debugd/debugd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package debugd contains internal packages for the debugd. diff --git a/debugd/internal/debugd/deploy/BUILD.bazel b/debugd/internal/debugd/deploy/BUILD.bazel index 29bdd0ccf..16ba9cdf3 100644 --- a/debugd/internal/debugd/deploy/BUILD.bazel +++ b/debugd/internal/debugd/deploy/BUILD.bazel @@ -18,7 +18,7 @@ go_library( "//internal/constants", "@com_github_coreos_go_systemd_v22//dbus", "@com_github_spf13_afero//:afero", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) @@ -39,7 +39,7 @@ go_test( "@com_github_spf13_afero//:afero", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_uber_go_goleak//:goleak", ], ) diff --git a/debugd/internal/debugd/deploy/deploy.go b/debugd/internal/debugd/deploy/deploy.go index 7e43f70cd..d91e0243f 100644 --- a/debugd/internal/debugd/deploy/deploy.go +++ b/debugd/internal/debugd/deploy/deploy.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/debugd/internal/debugd/deploy/download.go b/debugd/internal/debugd/deploy/download.go index 0409389f3..affe685d3 100644 --- a/debugd/internal/debugd/deploy/download.go +++ b/debugd/internal/debugd/deploy/download.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy @@ -53,7 +53,7 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error { log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) - client, closer, err := d.newClient(serverAddr, log) + client, closer, err := d.newClient(ctx, serverAddr, log) if err != nil { return err } @@ -74,7 +74,7 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) - client, closer, err := d.newClient(serverAddr, log) + client, closer, err := d.newClient(ctx, serverAddr, log) if err != nil { return err } @@ -117,17 +117,17 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { return nil } -func (d *Download) newClient(serverAddr string, log *slog.Logger) (pb.DebugdClient, io.Closer, error) { +func (d *Download) newClient(ctx context.Context, serverAddr string, log *slog.Logger) (pb.DebugdClient, io.Closer, error) { log.Info("Connecting to server") - conn, err := d.dial(serverAddr) + conn, err := d.dial(ctx, serverAddr) if err != nil { return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err) } return pb.NewDebugdClient(conn), conn, nil } -func (d *Download) dial(target string) (*grpc.ClientConn, error) { - return grpc.NewClient(target, +func (d *Download) dial(ctx context.Context, target string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, target, d.grpcWithDialer(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) diff --git a/debugd/internal/debugd/deploy/download_test.go b/debugd/internal/debugd/deploy/download_test.go index 0cd800124..8477377c5 100644 --- a/debugd/internal/debugd/deploy/download_test.go +++ b/debugd/internal/debugd/deploy/download_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy @@ -123,7 +123,7 @@ func TestDownloadDeployment(t *testing.T) { serviceManager: serviceMgr, } - err := download.DownloadDeployment(t.Context(), ip) + err := download.DownloadDeployment(context.Background(), ip) if tc.wantErr { assert.Error(err) @@ -194,7 +194,7 @@ func TestDownloadInfo(t *testing.T) { info: &tc.infoSetter, } - err := download.DownloadInfo(t.Context(), ip) + err := download.DownloadInfo(context.Background(), ip) if tc.wantErr { assert.Error(err) diff --git a/debugd/internal/debugd/deploy/service.go b/debugd/internal/debugd/deploy/service.go index 618875989..2c71891cc 100644 --- a/debugd/internal/debugd/deploy/service.go +++ b/debugd/internal/debugd/deploy/service.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy @@ -61,7 +61,6 @@ type SystemdUnit struct { type ServiceManager struct { log *slog.Logger dbus dbusClient - journal journalReader fs afero.Fs systemdUnitFilewriteLock sync.Mutex } @@ -72,7 +71,6 @@ func NewServiceManager(log *slog.Logger) *ServiceManager { return &ServiceManager{ log: log, dbus: &dbusWrapper{}, - journal: &journalctlWrapper{}, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, } @@ -91,8 +89,6 @@ type dbusConn interface { // StopUnitContext is similar to StartUnitContext, but stops the specified unit // rather than starting it. StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) - // ResetFailedUnitContext resets the "failed" state of a unit. - ResetFailedUnitContext(ctx context.Context, name string) error // RestartUnitContext restarts a service. If a service is restarted that isn't // running it will be started. RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) @@ -103,11 +99,6 @@ type dbusConn interface { Close() } -type journalReader interface { - // ReadJournal reads the journal for a specific unit. - readJournal(unit string) string -} - // SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload). func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error { log := s.log.With(slog.String("unit", request.Unit), slog.String("action", request.Action.String())) @@ -124,9 +115,6 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag case Stop: _, err = conn.StopUnitContext(ctx, request.Unit, "replace", resultChan) case Restart: - if err = conn.ResetFailedUnitContext(ctx, request.Unit); err != nil { - s.log.Error("Failed to reset unit failed state", "error", err.Error(), "unit", request.Unit) - } _, err = conn.RestartUnitContext(ctx, request.Unit, "replace", resultChan) case Reload: err = conn.ReloadContext(ctx) @@ -151,8 +139,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag return nil default: - serviceJournal := s.journal.readJournal(request.Unit) - return fmt.Errorf("performing action %q on systemd unit %q failed: expected %q but received %q. systemd unit journal entries: %s", request.Action.String(), request.Unit, "done", result, serviceJournal) + return fmt.Errorf("performing action %q on systemd unit %q failed: expected %q but received %q", request.Action.String(), request.Unit, "done", result) } } @@ -185,7 +172,7 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN if strings.Contains(execStart, "\n") || strings.Contains(execStart, "\r") { return fmt.Errorf("execStart must not contain newlines") } - overrideUnitContents := fmt.Sprintf("[Service]\nExecStart=\nExecStart=%s\n", execStart) + overrideUnitContents := fmt.Sprintf("[Service]\nExecStart=\nExecStart=%s $CONSTELLATION_DEBUG_FLAGS\n", execStart) s.systemdUnitFilewriteLock.Lock() defer s.systemdUnitFilewriteLock.Unlock() path := filepath.Join(systemdUnitFolder, unitName+".service.d", "override.conf") diff --git a/debugd/internal/debugd/deploy/service_test.go b/debugd/internal/debugd/deploy/service_test.go index 06d9820cd..7ee879644 100644 --- a/debugd/internal/debugd/deploy/service_test.go +++ b/debugd/internal/debugd/deploy/service_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy @@ -104,11 +104,10 @@ func TestSystemdAction(t *testing.T) { manager := ServiceManager{ log: logger.NewTest(t), dbus: &tc.dbus, - journal: &stubJournalReader{}, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, } - err := manager.SystemdAction(t.Context(), ServiceManagerRequest{ + err := manager.SystemdAction(context.Background(), ServiceManagerRequest{ Unit: unitName, Action: tc.action, }) @@ -184,11 +183,10 @@ func TestWriteSystemdUnitFile(t *testing.T) { manager := ServiceManager{ log: logger.NewTest(t), dbus: &tc.dbus, - journal: &stubJournalReader{}, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, } - err := manager.WriteSystemdUnitFile(t.Context(), tc.unit) + err := manager.WriteSystemdUnitFile(context.Background(), tc.unit) if tc.wantErr { assert.Error(err) @@ -220,7 +218,7 @@ func TestOverrideServiceUnitExecStart(t *testing.T) { }, unitName: "test", execStart: "/run/state/bin/test", - wantFileContents: "[Service]\nExecStart=\nExecStart=/run/state/bin/test\n", + wantFileContents: "[Service]\nExecStart=\nExecStart=/run/state/bin/test $CONSTELLATION_DEBUG_FLAGS\n", wantActionCalls: []dbusConnActionInput{ {name: "test.service", mode: "replace"}, }, @@ -266,7 +264,7 @@ func TestOverrideServiceUnitExecStart(t *testing.T) { }, unitName: "test", execStart: "/run/state/bin/test", - wantFileContents: "[Service]\nExecStart=\nExecStart=/run/state/bin/test\n", + wantFileContents: "[Service]\nExecStart=\nExecStart=/run/state/bin/test $CONSTELLATION_DEBUG_FLAGS\n", wantActionCalls: []dbusConnActionInput{ {name: "test.service", mode: "replace"}, }, @@ -298,11 +296,10 @@ func TestOverrideServiceUnitExecStart(t *testing.T) { manager := ServiceManager{ log: logger.NewTest(t), dbus: &tc.dbus, - journal: &stubJournalReader{}, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, } - err := manager.OverrideServiceUnitExecStart(t.Context(), tc.unitName, tc.execStart) + err := manager.OverrideServiceUnitExecStart(context.Background(), tc.unitName, tc.execStart) if tc.wantErr { assert.Error(err) @@ -356,10 +353,6 @@ func (c *fakeDbusConn) StopUnitContext(_ context.Context, name string, mode stri return c.jobID, c.actionErr } -func (c *fakeDbusConn) ResetFailedUnitContext(_ context.Context, _ string) error { - return nil -} - func (c *fakeDbusConn) RestartUnitContext(_ context.Context, name string, mode string, ch chan<- string) (int, error) { c.inputs = append(c.inputs, dbusConnActionInput{name: name, mode: mode}) ch <- c.result @@ -374,9 +367,3 @@ func (c *fakeDbusConn) ReloadContext(_ context.Context) error { } func (c *fakeDbusConn) Close() {} - -type stubJournalReader struct{} - -func (s *stubJournalReader) readJournal(_ string) string { - return "" -} diff --git a/debugd/internal/debugd/deploy/wrappers.go b/debugd/internal/debugd/deploy/wrappers.go index 57391d6a6..9ec9f0b01 100644 --- a/debugd/internal/debugd/deploy/wrappers.go +++ b/debugd/internal/debugd/deploy/wrappers.go @@ -1,14 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy import ( "context" - "os/exec" "github.com/coreos/go-systemd/v22/dbus" ) @@ -38,10 +37,6 @@ func (c *dbusConnWrapper) StopUnitContext(ctx context.Context, name string, mode return c.conn.StopUnitContext(ctx, name, mode, ch) } -func (c *dbusConnWrapper) ResetFailedUnitContext(ctx context.Context, name string) error { - return c.conn.ResetFailedUnitContext(ctx, name) -} - func (c *dbusConnWrapper) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { return c.conn.RestartUnitContext(ctx, name, mode, ch) } @@ -53,85 +48,3 @@ func (c *dbusConnWrapper) ReloadContext(ctx context.Context) error { func (c *dbusConnWrapper) Close() { c.conn.Close() } - -type journalctlWrapper struct{} - -func (j *journalctlWrapper) readJournal(unit string) string { - out, _ := exec.CommandContext(context.Background(), "journalctl", "-u", unit, "--no-pager").CombinedOutput() - return string(out) -} - -/* -// Preferably, we would use the systemd journal API directly. -// However, this requires linking against systemd libraries, so we go with the easier journalctl command for now. - -type sdJournalWrapper struct{} - -// readJournal reads the journal for a specific unit. -func (s *sdJournalWrapper) readJournal(unit string) string { - journal, err := sdjournal.NewJournal() - if err != nil { - log.Printf("opening journal: %s", err) - return "" - } - defer journal.Close() - - // Filter the journal for the specified unit - filters := []string{ - fmt.Sprintf("_SYSTEMD_UNIT=%s", unit), - fmt.Sprintf("UNIT=%s", unit), - fmt.Sprintf("OBJECT_SYSTEMD_UNIT=%s", unit), - fmt.Sprintf("_SYSTEMD_SLICE=%s", unit), - fmt.Sprintf("_SYSTEMD_USER_UNIT=%s", unit), - fmt.Sprintf("USER_UNIT=%s", unit), - fmt.Sprintf("COREDUMP_USER_UNIT=%s", unit), - fmt.Sprintf("OBJECT_SYSTEMD_USER_UNIT=%s", unit), - fmt.Sprintf("_SYSTEMD_USER_SLICE=%s", unit), - } - for _, filter := range filters { - if err := journal.AddMatch(filter); err != nil { - log.Printf("applying filter %q: %s", filter, err) - return "" - } - if err := journal.AddDisjunction(); err != nil { - log.Printf("adding disjunction to journal filter: %s", err) - return "" - } - } - - // Seek to the beginning of the journal - if err := journal.SeekHead(); err != nil { - log.Printf("seeking journal tail: %s", err) - return "" - } - - // Iterate over the journal entries - var previousCursor string - journalLog := &strings.Builder{} - for { - if _, err := journal.Next(); err != nil { - log.Printf("getting next entry in journal: %s", err) - return "" - } - - entry, err := journal.GetEntry() - if err != nil { - log.Printf("getting journal entry: %s", err) - return "" - } - - // Abort if we reached the end of the journal, i.e. the cursor didn't change - if entry.Cursor == previousCursor { - break - } - previousCursor = entry.Cursor - - if _, err := journalLog.WriteString(entry.Fields[sdjournal.SD_JOURNAL_FIELD_MESSAGE] + "\n"); err != nil { - log.Printf("copying journal entry to buffer: %s", err) - return "" - } - } - - return strings.TrimSpace(journalLog.String()) -} -*/ diff --git a/debugd/internal/debugd/info/info.go b/debugd/internal/debugd/info/info.go index a9a193734..06df4c71c 100644 --- a/debugd/internal/debugd/info/info.go +++ b/debugd/internal/debugd/info/info.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package info implements the info map that is diff --git a/debugd/internal/debugd/info/info_test.go b/debugd/internal/debugd/info/info_test.go index a2b38422d..9c129ff6a 100644 --- a/debugd/internal/debugd/info/info_test.go +++ b/debugd/internal/debugd/info/info_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package info diff --git a/debugd/internal/debugd/logcollector/credentials.go b/debugd/internal/debugd/logcollector/credentials.go index 03b94b174..3ada029e1 100644 --- a/debugd/internal/debugd/logcollector/credentials.go +++ b/debugd/internal/debugd/logcollector/credentials.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logcollector diff --git a/debugd/internal/debugd/logcollector/credentials_test.go b/debugd/internal/debugd/logcollector/credentials_test.go index 4bef5d86f..19d113c99 100644 --- a/debugd/internal/debugd/logcollector/credentials_test.go +++ b/debugd/internal/debugd/logcollector/credentials_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logcollector @@ -67,7 +67,7 @@ func TestGetOpensearchCredentialsGCP(t *testing.T) { g := &gcpCloudCredentialGetter{secretsAPI: tc.gcpAPI} - gotCreds, err := g.GetOpensearchCredentials(t.Context()) + gotCreds, err := g.GetOpensearchCredentials(context.Background()) if tc.wantErr { assert.Error(err) @@ -127,7 +127,7 @@ func TestGetOpensearchCredentialsAzure(t *testing.T) { a := &azureCloudCredentialGetter{secretsAPI: tc.azureAPI} - gotCreds, err := a.GetOpensearchCredentials(t.Context()) + gotCreds, err := a.GetOpensearchCredentials(context.Background()) if tc.wantErr { assert.Error(err) @@ -184,7 +184,7 @@ func TestGetOpensearchCredentialsAWS(t *testing.T) { a := &awsCloudCredentialGetter{secretmanager: tc.awsAPI} - gotCreds, err := a.GetOpensearchCredentials(t.Context()) + gotCreds, err := a.GetOpensearchCredentials(context.Background()) if tc.wantErr { assert.Error(err) diff --git a/debugd/internal/debugd/logcollector/fields.go b/debugd/internal/debugd/logcollector/fields.go index e35864c72..1de7de844 100644 --- a/debugd/internal/debugd/logcollector/fields.go +++ b/debugd/internal/debugd/logcollector/fields.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logcollector diff --git a/debugd/internal/debugd/logcollector/logcollector.go b/debugd/internal/debugd/logcollector/logcollector.go index 809133ad0..9723d102f 100644 --- a/debugd/internal/debugd/logcollector/logcollector.go +++ b/debugd/internal/debugd/logcollector/logcollector.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package logcollector uses podman to deploy logstash and filebeat containers @@ -11,6 +11,7 @@ package logcollector import ( "context" "fmt" + "io" "log/slog" "os" "os/exec" @@ -129,7 +130,7 @@ func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, d "--name=template", image, } - createContainerCmd := podman(ctx, createContainerArgs...) + createContainerCmd := exec.CommandContext(ctx, "podman", createContainerArgs...) logger.Info("Creating template container") if out, err := createContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("creating template container: %w\n%s", err, out) @@ -144,7 +145,7 @@ func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, d "template:/usr/share/constellogs/templates/", destDir, } - copyFromCmd := podman(ctx, copyFromArgs...) + copyFromCmd := exec.CommandContext(ctx, "podman", copyFromArgs...) logger.Info("Copying templates") if out, err := copyFromCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("copying templates: %w\n%s", err, out) @@ -154,7 +155,7 @@ func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, d "rm", "template", } - removeContainerCmd := podman(ctx, removeContainerArgs...) + removeContainerCmd := exec.CommandContext(ctx, "podman", removeContainerArgs...) logger.Info("Removing template container") if out, err := removeContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("removing template container: %w\n%s", err, out) @@ -175,43 +176,40 @@ func startPod(ctx context.Context, logger *slog.Logger) error { "create", "logcollection", } - createPodCmd := podman(ctx, createPodArgs...) + createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...) logger.Info(fmt.Sprintf("Create pod command: %v", createPodCmd.String())) if out, err := createPodCmd.CombinedOutput(); err != nil { return fmt.Errorf("failed to create pod: %w; output: %s", err, out) } // start logstash container + logstashLog := newCmdLogger(logger.WithGroup("logstash")) runLogstashArgs := []string{ "run", - "-d", - "--restart=unless-stopped", + "--rm", "--name=logstash", "--pod=logcollection", - "--log-driver=journald", + "--log-driver=none", "--volume=/run/logstash/pipeline:/usr/share/logstash/pipeline/:ro", versions.LogstashImage, } - runLogstashCmd := podman(ctx, runLogstashArgs...) + runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...) logger.Info(fmt.Sprintf("Run logstash command: %v", runLogstashCmd.String())) - if out, err := runLogstashCmd.CombinedOutput(); err != nil { - logger.Error("Could not start logstash container", "err", err, "output", out) + runLogstashCmd.Stdout = logstashLog + runLogstashCmd.Stderr = logstashLog + if err := runLogstashCmd.Start(); err != nil { return fmt.Errorf("failed to start logstash: %w", err) } - if out, err := podman(ctx, "wait", "logstash", "--condition=running", "--interval=15s").CombinedOutput(); err != nil { - logger.Error("Logstash container failed to reach healthy status", "err", err, "output", out) - return fmt.Errorf("waiting for logstash container to reach healthy status: %w; output: %s", err, out) - } // start filebeat container + filebeatLog := newCmdLogger(logger.WithGroup("filebeat")) runFilebeatArgs := []string{ "run", - "-d", - "--restart=unless-stopped", + "--rm", "--name=filebeat", "--pod=logcollection", "--privileged", - "--log-driver=journald", + "--log-driver=none", "--volume=/run/log/journal:/run/log/journal:ro", "--volume=/etc/machine-id:/etc/machine-id:ro", "--volume=/run/systemd:/run/systemd:ro", @@ -220,16 +218,13 @@ func startPod(ctx context.Context, logger *slog.Logger) error { "--volume=/run/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro", versions.FilebeatImage, } - runFilebeatCmd := podman(ctx, runFilebeatArgs...) + runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...) logger.Info(fmt.Sprintf("Run filebeat command: %v", runFilebeatCmd.String())) - if out, err := runFilebeatCmd.CombinedOutput(); err != nil { - logger.Error("Could not start filebeat container", "err", err, "output", out) + runFilebeatCmd.Stdout = filebeatLog + runFilebeatCmd.Stderr = filebeatLog + if err := runFilebeatCmd.Start(); err != nil { return fmt.Errorf("failed to run filebeat: %w", err) } - if out, err := podman(ctx, "wait", "filebeat", "--condition=running", "--interval=15s").CombinedOutput(); err != nil { - logger.Error("Filebeat container failed to reach healthy status", "err", err, "output", out) - return fmt.Errorf("waiting for filebeat container to reach healthy status: %w; output: %s", err, out) - } return nil } @@ -300,9 +295,17 @@ func setCloudMetadata(ctx context.Context, m map[string]string, provider cloudpr } } -func podman(ctx context.Context, args ...string) *exec.Cmd { - args = append([]string{"--runtime=runc"}, args...) - return exec.CommandContext(ctx, "podman", args...) +func newCmdLogger(logger *slog.Logger) io.Writer { + return &cmdLogger{logger: logger} +} + +type cmdLogger struct { + logger *slog.Logger +} + +func (c *cmdLogger) Write(p []byte) (n int, err error) { + c.logger.Info(string(p)) + return len(p), nil } type providerMetadata interface { diff --git a/debugd/internal/debugd/metadata/cloudprovider/cloudprovider.go b/debugd/internal/debugd/metadata/cloudprovider/cloudprovider.go index 52a7d09d3..64a19aa3d 100644 --- a/debugd/internal/debugd/metadata/cloudprovider/cloudprovider.go +++ b/debugd/internal/debugd/metadata/cloudprovider/cloudprovider.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package cloudprovider implements a metadata service for cloud providers. diff --git a/debugd/internal/debugd/metadata/cloudprovider/cloudprovider_test.go b/debugd/internal/debugd/metadata/cloudprovider/cloudprovider_test.go index 52c29e1b3..e7cbf23e5 100644 --- a/debugd/internal/debugd/metadata/cloudprovider/cloudprovider_test.go +++ b/debugd/internal/debugd/metadata/cloudprovider/cloudprovider_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudprovider @@ -56,7 +56,7 @@ func TestRole(t *testing.T) { fetcher := Fetcher{tc.meta} - role, err := fetcher.Role(t.Context()) + role, err := fetcher.Role(context.Background()) if tc.wantErr { assert.Error(err) @@ -110,7 +110,7 @@ func TestDiscoverDebugIPs(t *testing.T) { fetcher := Fetcher{ metaAPI: &tc.meta, } - ips, err := fetcher.DiscoverDebugdIPs(t.Context()) + ips, err := fetcher.DiscoverDebugdIPs(context.Background()) if tc.wantErr { assert.Error(err) @@ -149,7 +149,7 @@ func TestDiscoverLoadBalancerIP(t *testing.T) { metaAPI: tc.metaAPI, } - ip, err := fetcher.DiscoverLoadBalancerIP(t.Context()) + ip, err := fetcher.DiscoverLoadBalancerIP(context.Background()) if tc.wantErr { assert.Error(err) diff --git a/debugd/internal/debugd/metadata/fallback/fallback.go b/debugd/internal/debugd/metadata/fallback/fallback.go index 9b60a1a77..39308390f 100644 --- a/debugd/internal/debugd/metadata/fallback/fallback.go +++ b/debugd/internal/debugd/metadata/fallback/fallback.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package fallback implements a fake metadata backend. diff --git a/debugd/internal/debugd/metadata/fallback/fallback_test.go b/debugd/internal/debugd/metadata/fallback/fallback_test.go index c00fb5893..9ec2d4e0b 100644 --- a/debugd/internal/debugd/metadata/fallback/fallback_test.go +++ b/debugd/internal/debugd/metadata/fallback/fallback_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package fallback import ( + "context" "testing" "github.com/edgelesssys/constellation/v2/internal/role" @@ -22,19 +23,19 @@ func TestDiscoverDebugdIPs(t *testing.T) { assert := assert.New(t) fetcher := NewFallbackFetcher() - ips, err := fetcher.DiscoverDebugdIPs(t.Context()) + ips, err := fetcher.DiscoverDebugdIPs(context.Background()) assert.NoError(err) assert.Empty(ips) - rol, err := fetcher.Role(t.Context()) + rol, err := fetcher.Role(context.Background()) assert.NoError(err) assert.Equal(rol, role.Unknown) - uid, err := fetcher.UID(t.Context()) + uid, err := fetcher.UID(context.Background()) assert.NoError(err) assert.Empty(uid) - self, err := fetcher.Self(t.Context()) + self, err := fetcher.Self(context.Background()) assert.NoError(err) assert.Empty(self) } diff --git a/debugd/internal/debugd/metadata/metadata.go b/debugd/internal/debugd/metadata/metadata.go index 814e3d7f4..cecbff67a 100644 --- a/debugd/internal/debugd/metadata/metadata.go +++ b/debugd/internal/debugd/metadata/metadata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package metadata schedules the discovery of other debugd instances diff --git a/debugd/internal/debugd/metadata/scheduler.go b/debugd/internal/debugd/metadata/scheduler.go index e7352fb73..bf6705fec 100644 --- a/debugd/internal/debugd/metadata/scheduler.go +++ b/debugd/internal/debugd/metadata/scheduler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package metadata diff --git a/debugd/internal/debugd/metadata/scheduler_test.go b/debugd/internal/debugd/metadata/scheduler_test.go index 165022837..13f9d4707 100644 --- a/debugd/internal/debugd/metadata/scheduler_test.go +++ b/debugd/internal/debugd/metadata/scheduler_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package metadata @@ -91,7 +91,7 @@ func TestSchedulerStart(t *testing.T) { } wg := &sync.WaitGroup{} - scheduler.Start(t.Context(), wg) + scheduler.Start(context.Background(), wg) wg.Wait() assert.Equal(tc.wantDeploymentDownloads, tc.downloader.downloadDeploymentIPs) diff --git a/debugd/internal/debugd/server/BUILD.bazel b/debugd/internal/debugd/server/BUILD.bazel index 6d8d298be..ee0de46c2 100644 --- a/debugd/internal/debugd/server/BUILD.bazel +++ b/debugd/internal/debugd/server/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//debugd/service", "//internal/constants", "//internal/logger", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//keepalive", ], ) @@ -32,7 +32,7 @@ go_test( "//internal/logger", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_goleak//:goleak", ], diff --git a/debugd/internal/debugd/server/server.go b/debugd/internal/debugd/server/server.go index ef0763a10..aeae7c4a1 100644 --- a/debugd/internal/debugd/server/server.go +++ b/debugd/internal/debugd/server/server.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package server implements the gRPC endpoint of Constellation's debugd. @@ -106,7 +106,6 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { s.log.With(slog.Any("error", err)).Error("Uploading files failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED, - Error: err.Error(), }) } @@ -125,7 +124,6 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { s.log.With(slog.Any("error", overrideUnitErr)).Error("Overriding service units failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_START_FAILED, - Error: overrideUnitErr.Error(), }) } return stream.SendAndClose(&pb.UploadFilesResponse{ @@ -157,8 +155,8 @@ func Start(log *slog.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { go func() { defer wg.Done() - grpcLog := logger.GRPCLogger(log) - logger.ReplaceGRPCLogger(grpcLog) + grpcLog := log.WithGroup("gRPC") + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, grpcLog.Handler()))) grpcServer := grpc.NewServer( logger.GetServerStreamInterceptor(grpcLog), diff --git a/debugd/internal/debugd/server/server_test.go b/debugd/internal/debugd/server/server_test.go index a340c425f..3ae8b3312 100644 --- a/debugd/internal/debugd/server/server_test.go +++ b/debugd/internal/debugd/server/server_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package server @@ -79,7 +79,7 @@ func TestSetInfo(t *testing.T) { defer conn.Close() client := pb.NewDebugdClient(conn) - setInfoStatus, err := client.SetInfo(t.Context(), &pb.SetInfoRequest{Info: tc.setInfo}) + setInfoStatus, err := client.SetInfo(context.Background(), &pb.SetInfoRequest{Info: tc.setInfo}) grpcServ.GracefulStop() assert.NoError(err) @@ -137,7 +137,7 @@ func TestGetInfo(t *testing.T) { defer conn.Close() client := pb.NewDebugdClient(conn) - resp, err := client.GetInfo(t.Context(), &pb.GetInfoRequest{}) + resp, err := client.GetInfo(context.Background(), &pb.GetInfoRequest{}) grpcServ.GracefulStop() if tc.wantErr { @@ -201,7 +201,7 @@ func TestUploadFiles(t *testing.T) { require.NoError(err) defer conn.Close() client := pb.NewDebugdClient(conn) - stream, err := client.UploadFiles(t.Context()) + stream, err := client.UploadFiles(context.Background()) require.NoError(err) resp, err := stream.CloseAndRecv() @@ -245,7 +245,7 @@ func TestDownloadFiles(t *testing.T) { require.NoError(err) defer conn.Close() client := pb.NewDebugdClient(conn) - stream, err := client.DownloadFiles(t.Context(), tc.request) + stream, err := client.DownloadFiles(context.Background(), tc.request) require.NoError(err) _, recvErr := stream.Recv() if tc.wantRecvErr { @@ -324,7 +324,7 @@ func TestUploadSystemServiceUnits(t *testing.T) { require.NoError(err) defer conn.Close() client := pb.NewDebugdClient(conn) - resp, err := client.UploadSystemServiceUnits(t.Context(), tc.request) + resp, err := client.UploadSystemServiceUnits(context.Background(), tc.request) grpcServ.GracefulStop() @@ -371,8 +371,8 @@ type netDialer interface { DialContext(_ context.Context, network, address string) (net.Conn, error) } -func dial(dialer netDialer, target string) (*grpc.ClientConn, error) { - return grpc.NewClient(target, +func dial(ctx context.Context, dialer netDialer, target string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, target, grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", addr) }), @@ -414,7 +414,7 @@ func setupServerWithConn(endpoint string, serv *debugdServer) (*grpc.Server, *gr lis := dialer.GetListener(endpoint) go grpcServ.Serve(lis) - conn, err := dial(dialer, endpoint) + conn, err := dial(context.Background(), dialer, endpoint) if err != nil { return nil, nil, err } diff --git a/debugd/internal/filetransfer/chunkstream.go b/debugd/internal/filetransfer/chunkstream.go index 5fea59a15..9c36b968f 100644 --- a/debugd/internal/filetransfer/chunkstream.go +++ b/debugd/internal/filetransfer/chunkstream.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package filetransfer diff --git a/debugd/internal/filetransfer/chunkstream_test.go b/debugd/internal/filetransfer/chunkstream_test.go index e09e144de..f01cbc136 100644 --- a/debugd/internal/filetransfer/chunkstream_test.go +++ b/debugd/internal/filetransfer/chunkstream_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package filetransfer diff --git a/debugd/internal/filetransfer/filetransfer.go b/debugd/internal/filetransfer/filetransfer.go index c15c5d0e4..04c784be1 100644 --- a/debugd/internal/filetransfer/filetransfer.go +++ b/debugd/internal/filetransfer/filetransfer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package filetransfer implements the exchange of files between cdgb <-> debugd diff --git a/debugd/internal/filetransfer/filetransfer_test.go b/debugd/internal/filetransfer/filetransfer_test.go index 7628edc3e..fed9d9d05 100644 --- a/debugd/internal/filetransfer/filetransfer_test.go +++ b/debugd/internal/filetransfer/filetransfer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package filetransfer diff --git a/debugd/internal/filetransfer/streamer/streamer.go b/debugd/internal/filetransfer/streamer/streamer.go index f48276ee9..3648bb4d5 100644 --- a/debugd/internal/filetransfer/streamer/streamer.go +++ b/debugd/internal/filetransfer/streamer/streamer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package streamer implements streaming of files over gRPC. diff --git a/debugd/internal/filetransfer/streamer/streamer_test.go b/debugd/internal/filetransfer/streamer/streamer_test.go index 06e95324d..ec64b8ba9 100644 --- a/debugd/internal/filetransfer/streamer/streamer_test.go +++ b/debugd/internal/filetransfer/streamer/streamer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package streamer diff --git a/debugd/logstash/Dockerfile b/debugd/logstash/Dockerfile index 8d538596a..fb0657586 100644 --- a/debugd/logstash/Dockerfile +++ b/debugd/logstash/Dockerfile @@ -1,11 +1,11 @@ -FROM fedora:40@sha256:3c86d25fef9d2001712bc3d9b091fc40cf04be4767e48f1aa3b785bf58d300ed AS build +FROM fedora:38@sha256:3f01c8f79691df76331cb4bb0944794a60850475e859c15e49513fcbe0a3d88a AS build ARG LOGSTASH_VER=8.6.1 RUN curl -fsSLO https://artifacts.opensearch.org/logstash/logstash-oss-with-opensearch-output-plugin-$LOGSTASH_VER-linux-x64.tar.gz RUN tar -zxvf logstash-oss-with-opensearch-output-plugin-$LOGSTASH_VER-linux-x64.tar.gz -FROM fedora:40@sha256:3c86d25fef9d2001712bc3d9b091fc40cf04be4767e48f1aa3b785bf58d300ed AS release +FROM fedora:38@sha256:3f01c8f79691df76331cb4bb0944794a60850475e859c15e49513fcbe0a3d88a AS release COPY --from=build logstash-* /usr/share/logstash diff --git a/debugd/logstash/assets.go b/debugd/logstash/assets.go index 4fda7bb88..e49e1f60d 100644 --- a/debugd/logstash/assets.go +++ b/debugd/logstash/assets.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logstash diff --git a/debugd/metricbeat/Dockerfile b/debugd/metricbeat/Dockerfile index 11694af43..013f23a3c 100644 --- a/debugd/metricbeat/Dockerfile +++ b/debugd/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM fedora:40@sha256:3c86d25fef9d2001712bc3d9b091fc40cf04be4767e48f1aa3b785bf58d300ed AS release +FROM fedora:38@sha256:3f01c8f79691df76331cb4bb0944794a60850475e859c15e49513fcbe0a3d88a AS release RUN dnf install -y https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-8.9.2-x86_64.rpm diff --git a/debugd/metricbeat/assets.go b/debugd/metricbeat/assets.go index faa3dc8fa..8f3f954f1 100644 --- a/debugd/metricbeat/assets.go +++ b/debugd/metricbeat/assets.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package metricbeat diff --git a/debugd/service/debugd.pb.go b/debugd/service/debugd.pb.go index 8414c895f..fb95a1221 100644 --- a/debugd/service/debugd.pb.go +++ b/debugd/service/debugd.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: debugd/service/debugd.proto package service @@ -15,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -173,17 +172,20 @@ func (UploadSystemdServiceUnitsStatus) EnumDescriptor() ([]byte, []int) { } type SetInfoRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } func (x *SetInfoRequest) Reset() { *x = SetInfoRequest{} - mi := &file_debugd_service_debugd_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SetInfoRequest) String() string { @@ -194,7 +196,7 @@ func (*SetInfoRequest) ProtoMessage() {} func (x *SetInfoRequest) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -217,17 +219,20 @@ func (x *SetInfoRequest) GetInfo() []*Info { } type SetInfoResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Status SetInfoStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.SetInfoStatus" json:"status,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status SetInfoStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.SetInfoStatus" json:"status,omitempty"` } func (x *SetInfoResponse) Reset() { *x = SetInfoResponse{} - mi := &file_debugd_service_debugd_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SetInfoResponse) String() string { @@ -238,7 +243,7 @@ func (*SetInfoResponse) ProtoMessage() {} func (x *SetInfoResponse) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -261,16 +266,18 @@ func (x *SetInfoResponse) GetStatus() SetInfoStatus { } type GetInfoRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *GetInfoRequest) Reset() { *x = GetInfoRequest{} - mi := &file_debugd_service_debugd_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetInfoRequest) String() string { @@ -281,7 +288,7 @@ func (*GetInfoRequest) ProtoMessage() {} func (x *GetInfoRequest) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,17 +304,20 @@ func (*GetInfoRequest) Descriptor() ([]byte, []int) { } type GetInfoResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } func (x *GetInfoResponse) Reset() { *x = GetInfoResponse{} - mi := &file_debugd_service_debugd_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetInfoResponse) String() string { @@ -318,7 +328,7 @@ func (*GetInfoResponse) ProtoMessage() {} func (x *GetInfoResponse) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -341,18 +351,21 @@ func (x *GetInfoResponse) GetInfo() []*Info { } type Info struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *Info) Reset() { *x = Info{} - mi := &file_debugd_service_debugd_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Info) String() string { @@ -363,7 +376,7 @@ func (*Info) ProtoMessage() {} func (x *Info) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -393,16 +406,18 @@ func (x *Info) GetValue() string { } type DownloadFilesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *DownloadFilesRequest) Reset() { *x = DownloadFilesRequest{} - mi := &file_debugd_service_debugd_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DownloadFilesRequest) String() string { @@ -413,7 +428,7 @@ func (*DownloadFilesRequest) ProtoMessage() {} func (x *DownloadFilesRequest) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -429,21 +444,24 @@ func (*DownloadFilesRequest) Descriptor() ([]byte, []int) { } type FileTransferMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Kind: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: // // *FileTransferMessage_Header // *FileTransferMessage_Chunk - Kind isFileTransferMessage_Kind `protobuf_oneof:"kind"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Kind isFileTransferMessage_Kind `protobuf_oneof:"kind"` } func (x *FileTransferMessage) Reset() { *x = FileTransferMessage{} - mi := &file_debugd_service_debugd_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileTransferMessage) String() string { @@ -454,7 +472,7 @@ func (*FileTransferMessage) ProtoMessage() {} func (x *FileTransferMessage) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -469,27 +487,23 @@ func (*FileTransferMessage) Descriptor() ([]byte, []int) { return file_debugd_service_debugd_proto_rawDescGZIP(), []int{6} } -func (x *FileTransferMessage) GetKind() isFileTransferMessage_Kind { - if x != nil { - return x.Kind +func (m *FileTransferMessage) GetKind() isFileTransferMessage_Kind { + if m != nil { + return m.Kind } return nil } func (x *FileTransferMessage) GetHeader() *FileTransferHeader { - if x != nil { - if x, ok := x.Kind.(*FileTransferMessage_Header); ok { - return x.Header - } + if x, ok := x.GetKind().(*FileTransferMessage_Header); ok { + return x.Header } return nil } func (x *FileTransferMessage) GetChunk() *Chunk { - if x != nil { - if x, ok := x.Kind.(*FileTransferMessage_Chunk); ok { - return x.Chunk - } + if x, ok := x.GetKind().(*FileTransferMessage_Chunk); ok { + return x.Chunk } return nil } @@ -511,19 +525,22 @@ func (*FileTransferMessage_Header) isFileTransferMessage_Kind() {} func (*FileTransferMessage_Chunk) isFileTransferMessage_Kind() {} type FileTransferHeader struct { - state protoimpl.MessageState `protogen:"open.v1"` - TargetPath string `protobuf:"bytes,1,opt,name=targetPath,proto3" json:"targetPath,omitempty"` - Mode uint32 `protobuf:"varint,3,opt,name=mode,proto3" json:"mode,omitempty"` - OverrideServiceUnit *string `protobuf:"bytes,4,opt,name=overrideServiceUnit,proto3,oneof" json:"overrideServiceUnit,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TargetPath string `protobuf:"bytes,1,opt,name=targetPath,proto3" json:"targetPath,omitempty"` + Mode uint32 `protobuf:"varint,3,opt,name=mode,proto3" json:"mode,omitempty"` + OverrideServiceUnit *string `protobuf:"bytes,4,opt,name=overrideServiceUnit,proto3,oneof" json:"overrideServiceUnit,omitempty"` } func (x *FileTransferHeader) Reset() { *x = FileTransferHeader{} - mi := &file_debugd_service_debugd_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileTransferHeader) String() string { @@ -534,7 +551,7 @@ func (*FileTransferHeader) ProtoMessage() {} func (x *FileTransferHeader) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -571,18 +588,21 @@ func (x *FileTransferHeader) GetOverrideServiceUnit() string { } type Chunk struct { - state protoimpl.MessageState `protogen:"open.v1"` - Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - Last bool `protobuf:"varint,2,opt,name=last,proto3" json:"last,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + Last bool `protobuf:"varint,2,opt,name=last,proto3" json:"last,omitempty"` } func (x *Chunk) Reset() { *x = Chunk{} - mi := &file_debugd_service_debugd_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Chunk) String() string { @@ -593,7 +613,7 @@ func (*Chunk) ProtoMessage() {} func (x *Chunk) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -623,18 +643,20 @@ func (x *Chunk) GetLast() bool { } type UploadFilesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Status UploadFilesStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.UploadFilesStatus" json:"status,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status UploadFilesStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.UploadFilesStatus" json:"status,omitempty"` } func (x *UploadFilesResponse) Reset() { *x = UploadFilesResponse{} - mi := &file_debugd_service_debugd_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *UploadFilesResponse) String() string { @@ -645,7 +667,7 @@ func (*UploadFilesResponse) ProtoMessage() {} func (x *UploadFilesResponse) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -667,26 +689,22 @@ func (x *UploadFilesResponse) GetStatus() UploadFilesStatus { return UploadFilesStatus_UPLOAD_FILES_SUCCESS } -func (x *UploadFilesResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - type ServiceUnit struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Contents string `protobuf:"bytes,2,opt,name=contents,proto3" json:"contents,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Contents string `protobuf:"bytes,2,opt,name=contents,proto3" json:"contents,omitempty"` } func (x *ServiceUnit) Reset() { *x = ServiceUnit{} - mi := &file_debugd_service_debugd_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ServiceUnit) String() string { @@ -697,7 +715,7 @@ func (*ServiceUnit) ProtoMessage() {} func (x *ServiceUnit) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -727,17 +745,20 @@ func (x *ServiceUnit) GetContents() string { } type UploadSystemdServiceUnitsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Units []*ServiceUnit `protobuf:"bytes,1,rep,name=units,proto3" json:"units,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Units []*ServiceUnit `protobuf:"bytes,1,rep,name=units,proto3" json:"units,omitempty"` } func (x *UploadSystemdServiceUnitsRequest) Reset() { *x = UploadSystemdServiceUnitsRequest{} - mi := &file_debugd_service_debugd_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *UploadSystemdServiceUnitsRequest) String() string { @@ -748,7 +769,7 @@ func (*UploadSystemdServiceUnitsRequest) ProtoMessage() {} func (x *UploadSystemdServiceUnitsRequest) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -771,17 +792,20 @@ func (x *UploadSystemdServiceUnitsRequest) GetUnits() []*ServiceUnit { } type UploadSystemdServiceUnitsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Status UploadSystemdServiceUnitsStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.UploadSystemdServiceUnitsStatus" json:"status,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status UploadSystemdServiceUnitsStatus `protobuf:"varint,1,opt,name=status,proto3,enum=debugd.UploadSystemdServiceUnitsStatus" json:"status,omitempty"` } func (x *UploadSystemdServiceUnitsResponse) Reset() { *x = UploadSystemdServiceUnitsResponse{} - mi := &file_debugd_service_debugd_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_debugd_service_debugd_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *UploadSystemdServiceUnitsResponse) String() string { @@ -792,7 +816,7 @@ func (*UploadSystemdServiceUnitsResponse) ProtoMessage() {} func (x *UploadSystemdServiceUnitsResponse) ProtoReflect() protoreflect.Message { mi := &file_debugd_service_debugd_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -816,78 +840,136 @@ func (x *UploadSystemdServiceUnitsResponse) GetStatus() UploadSystemdServiceUnit var File_debugd_service_debugd_proto protoreflect.FileDescriptor -const file_debugd_service_debugd_proto_rawDesc = "" + - "\n" + - "\x1bdebugd/service/debugd.proto\x12\x06debugd\"2\n" + - "\x0eSetInfoRequest\x12 \n" + - "\x04info\x18\x01 \x03(\v2\f.debugd.InfoR\x04info\"@\n" + - "\x0fSetInfoResponse\x12-\n" + - "\x06status\x18\x01 \x01(\x0e2\x15.debugd.SetInfoStatusR\x06status\"\x10\n" + - "\x0eGetInfoRequest\"3\n" + - "\x0fGetInfoResponse\x12 \n" + - "\x04info\x18\x01 \x03(\v2\f.debugd.InfoR\x04info\".\n" + - "\x04Info\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value\"\x16\n" + - "\x14DownloadFilesRequest\"z\n" + - "\x13FileTransferMessage\x124\n" + - "\x06header\x18\x01 \x01(\v2\x1a.debugd.FileTransferHeaderH\x00R\x06header\x12%\n" + - "\x05chunk\x18\x02 \x01(\v2\r.debugd.ChunkH\x00R\x05chunkB\x06\n" + - "\x04kind\"\x97\x01\n" + - "\x12FileTransferHeader\x12\x1e\n" + - "\n" + - "targetPath\x18\x01 \x01(\tR\n" + - "targetPath\x12\x12\n" + - "\x04mode\x18\x03 \x01(\rR\x04mode\x125\n" + - "\x13overrideServiceUnit\x18\x04 \x01(\tH\x00R\x13overrideServiceUnit\x88\x01\x01B\x16\n" + - "\x14_overrideServiceUnit\"5\n" + - "\x05Chunk\x12\x18\n" + - "\acontent\x18\x01 \x01(\fR\acontent\x12\x12\n" + - "\x04last\x18\x02 \x01(\bR\x04last\"^\n" + - "\x13UploadFilesResponse\x121\n" + - "\x06status\x18\x01 \x01(\x0e2\x19.debugd.UploadFilesStatusR\x06status\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\"=\n" + - "\vServiceUnit\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x1a\n" + - "\bcontents\x18\x02 \x01(\tR\bcontents\"M\n" + - " UploadSystemdServiceUnitsRequest\x12)\n" + - "\x05units\x18\x01 \x03(\v2\x13.debugd.ServiceUnitR\x05units\"d\n" + - "!UploadSystemdServiceUnitsResponse\x12?\n" + - "\x06status\x18\x01 \x01(\x0e2'.debugd.UploadSystemdServiceUnitsStatusR\x06status*?\n" + - "\rSetInfoStatus\x12\x14\n" + - "\x10SET_INFO_SUCCESS\x10\x00\x12\x18\n" + - "\x14SET_INFO_ALREADY_SET\x10\x01*\xb1\x01\n" + - "\x11UploadFilesStatus\x12\x18\n" + - "\x14UPLOAD_FILES_SUCCESS\x10\x00\x12\x1e\n" + - "\x1aUPLOAD_FILES_UPLOAD_FAILED\x10\x01\x12 \n" + - "\x1cUPLOAD_FILES_ALREADY_STARTED\x10\x02\x12!\n" + - "\x1dUPLOAD_FILES_ALREADY_FINISHED\x10\x03\x12\x1d\n" + - "\x19UPLOAD_FILES_START_FAILED\x10\x04*u\n" + - "\x1fUploadSystemdServiceUnitsStatus\x12(\n" + - "$UPLOAD_SYSTEMD_SERVICE_UNITS_SUCCESS\x10\x00\x12(\n" + - "$UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE\x10\x012\x94\x03\n" + - "\x06Debugd\x12<\n" + - "\aSetInfo\x12\x16.debugd.SetInfoRequest\x1a\x17.debugd.SetInfoResponse\"\x00\x12<\n" + - "\aGetInfo\x12\x16.debugd.GetInfoRequest\x1a\x17.debugd.GetInfoResponse\"\x00\x12K\n" + - "\vUploadFiles\x12\x1b.debugd.FileTransferMessage\x1a\x1b.debugd.UploadFilesResponse\"\x00(\x01\x12N\n" + - "\rDownloadFiles\x12\x1c.debugd.DownloadFilesRequest\x1a\x1b.debugd.FileTransferMessage\"\x000\x01\x12q\n" + - "\x18UploadSystemServiceUnits\x12(.debugd.UploadSystemdServiceUnitsRequest\x1a).debugd.UploadSystemdServiceUnitsResponse\"\x00B8Z6github.com/edgelesssys/constellation/v2/debugd/serviceb\x06proto3" +var file_debugd_service_debugd_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x64, 0x22, 0x32, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x40, 0x0a, 0x0f, 0x53, 0x65, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x10, 0x0a, 0x0e, 0x47, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x33, 0x0a, + 0x0f, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x20, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x22, 0x2e, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x7a, 0x0a, 0x13, 0x46, 0x69, + 0x6c, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x34, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x06, + 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x97, 0x01, 0x0a, 0x12, 0x46, 0x69, 0x6c, 0x65, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1e, 0x0a, + 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x55, 0x6e, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, + 0x22, 0x35, 0x0a, 0x05, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x04, 0x6c, 0x61, 0x73, 0x74, 0x22, 0x48, 0x0a, 0x13, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, + 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x3d, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x4d, 0x0a, 0x20, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, + 0x64, 0x0a, 0x21, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x3f, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x54, 0x5f, 0x49, 0x4e, + 0x46, 0x4f, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x53, 0x45, 0x54, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, + 0x5f, 0x53, 0x45, 0x54, 0x10, 0x01, 0x2a, 0xb1, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, + 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5f, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, + 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, + 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x55, 0x50, 0x4c, 0x4f, + 0x41, 0x44, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, + 0x5f, 0x46, 0x49, 0x4e, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x55, + 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5f, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x75, 0x0a, 0x1f, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, + 0x24, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x44, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x53, 0x55, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x28, 0x0a, 0x24, 0x55, 0x50, 0x4c, 0x4f, 0x41, + 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x54, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, + 0x01, 0x32, 0x94, 0x03, 0x0a, 0x06, 0x44, 0x65, 0x62, 0x75, 0x67, 0x64, 0x12, 0x3c, 0x0a, 0x07, + 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, + 0x2e, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x47, 0x65, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x47, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x4e, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, + 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x18, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, + 0x73, 0x12, 0x28, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, + 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x64, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, + 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_debugd_service_debugd_proto_rawDescOnce sync.Once - file_debugd_service_debugd_proto_rawDescData []byte + file_debugd_service_debugd_proto_rawDescData = file_debugd_service_debugd_proto_rawDesc ) func file_debugd_service_debugd_proto_rawDescGZIP() []byte { file_debugd_service_debugd_proto_rawDescOnce.Do(func() { - file_debugd_service_debugd_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_debugd_service_debugd_proto_rawDesc), len(file_debugd_service_debugd_proto_rawDesc))) + file_debugd_service_debugd_proto_rawDescData = protoimpl.X.CompressGZIP(file_debugd_service_debugd_proto_rawDescData) }) return file_debugd_service_debugd_proto_rawDescData } var file_debugd_service_debugd_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_debugd_service_debugd_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_debugd_service_debugd_proto_goTypes = []any{ +var file_debugd_service_debugd_proto_goTypes = []interface{}{ (SetInfoStatus)(0), // 0: debugd.SetInfoStatus (UploadFilesStatus)(0), // 1: debugd.UploadFilesStatus (UploadSystemdServiceUnitsStatus)(0), // 2: debugd.UploadSystemdServiceUnitsStatus @@ -936,16 +1018,174 @@ func file_debugd_service_debugd_proto_init() { if File_debugd_service_debugd_proto != nil { return } - file_debugd_service_debugd_proto_msgTypes[6].OneofWrappers = []any{ + if !protoimpl.UnsafeEnabled { + file_debugd_service_debugd_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadFilesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileTransferMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileTransferHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Chunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadFilesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceUnit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadSystemdServiceUnitsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debugd_service_debugd_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadSystemdServiceUnitsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_debugd_service_debugd_proto_msgTypes[6].OneofWrappers = []interface{}{ (*FileTransferMessage_Header)(nil), (*FileTransferMessage_Chunk)(nil), } - file_debugd_service_debugd_proto_msgTypes[7].OneofWrappers = []any{} + file_debugd_service_debugd_proto_msgTypes[7].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_debugd_service_debugd_proto_rawDesc), len(file_debugd_service_debugd_proto_rawDesc)), + RawDescriptor: file_debugd_service_debugd_proto_rawDesc, NumEnums: 3, NumMessages: 13, NumExtensions: 0, @@ -957,6 +1197,7 @@ func file_debugd_service_debugd_proto_init() { MessageInfos: file_debugd_service_debugd_proto_msgTypes, }.Build() File_debugd_service_debugd_proto = out.File + file_debugd_service_debugd_proto_rawDesc = nil file_debugd_service_debugd_proto_goTypes = nil file_debugd_service_debugd_proto_depIdxs = nil } diff --git a/debugd/service/debugd.proto b/debugd/service/debugd.proto index 77bc94a60..db9c3b896 100644 --- a/debugd/service/debugd.proto +++ b/debugd/service/debugd.proto @@ -58,7 +58,6 @@ message Chunk { message UploadFilesResponse { UploadFilesStatus status = 1; - string error = 2; } enum UploadFilesStatus { diff --git a/dev-docs/chain-of-trust.jpg b/dev-docs/chain-of-trust.jpg deleted file mode 100644 index 713e4715e..000000000 Binary files a/dev-docs/chain-of-trust.jpg and /dev/null differ diff --git a/dev-docs/conventions.md b/dev-docs/conventions.md index 5f9e9c132..02b08e1db 100644 --- a/dev-docs/conventions.md +++ b/dev-docs/conventions.md @@ -14,10 +14,10 @@ This project also aims to follow the [Go Proverbs](https://go-proverbs.github.io ## Linting This projects uses [golangci-lint](https://golangci-lint.run/) for linting. -You can [install golangci-lint](https://golangci-lint.run/welcome/install/#local-installation) locally, +You can [install golangci-lint](https://golangci-lint.run/usage/install/#linux-and-windows) locally, but there is also a CI action to ensure compliance. -It is also recommended to use golangci-lint (and [gofumpt](https://github.com/mvdan/gofumpt) as formatter) in your IDE, by adding the recommended VS Code Settings or by [configuring it yourself](https://golangci-lint.run/welcome/integrations/) +It is also recommended to use golangci-lint (and [gofumpt](https://github.com/mvdan/gofumpt) as formatter) in your IDE, by adding the recommended VS Code Settings or by [configuring it yourself](https://golangci-lint.run/usage/integrations/#editor-integration) ## Logging diff --git a/dev-docs/howto/bare-metal/README.md b/dev-docs/howto/bare-metal/README.md deleted file mode 100644 index e4df4cd97..000000000 --- a/dev-docs/howto/bare-metal/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Bare-metal SNP setup for Constellation - -## Prepare Host - -The bare-metal host machine needs to be able to start SEV-SNP VMs. -A thorough explanation can be found here: . - -First checkout the snp-latest branch: - -```bash -git clone https://github.com/AMDESE/AMDSEV.git -cd AMDSEV -git checkout snp-latest -``` - -Then enable TPM2 support by setting `-DTPM2_ENABLE` in the OVMF build command -found in `common.sh`: - -```patch -diff --git a/common.sh b/common.sh -index 9eee947..52bf507 100755 ---- a/common.sh -+++ b/common.sh -@@ -155,7 +155,7 @@ build_install_ovmf() - GCCVERS="GCC5" - fi - -- BUILD_CMD="nice build -q --cmd-len=64436 -DDEBUG_ON_SERIAL_PORT=TRUE -n $(getconf _NPROCESSORS_ONLN) ${GCCVERS:+-t $GCCVERS} -a X64 -p OvmfPkg/OvmfPkgX64.dsc" -+ BUILD_CMD="nice build -q --cmd-len=64436 -DTPM2_ENABLE -DDEBUG_ON_SERIAL_PORT=TRUE -n $(getconf _NPROCESSORS_ONLN) ${GCCVERS:+-t $GCCVERS} -a X64 -p OvmfPkg/OvmfPkgX64.dsc" - - # initialize git repo, or update existing remote to currently configured one - if [ -d ovmf ]; then -``` - -Build and package the binaries. Then install the newly build kernel: - -```bash -./build.sh --package -cd linux -dpkg -i linux-image-6.9.0-rc7-snp-host-05b10142ac6a_6.9.0-rc7-g05b10142ac6a-2_amd64.deb -``` - -Reboot, verify that the right BIOS setting are set as described in - -and select the new kernel in the boot menu. Note that GRUB usually automatically -select the newest installed kernel as default. - -Download a Constellation qemu image, the `constellation-conf.yaml`, and -the `launch-constellation.sh` script in the directory right next to the -`AMDSEV` folder. - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/dev-docs/howto/bare-metal/launch-constellation.sh -wget https://cdn.confidential.cloud/constellation/v1/ref/main/stream/console/v2.17.0-pre.0.20240516182331-5fb2a2cb89f2/image/csp/qemu/qemu-vtpm/image.raw -wget < link to the constellation CLI provided by Edgeless > -wget < link to the constellation config provided by Edgeless > -``` - -Install and setup [docker](https://docs.docker.com/engine/install/), -install swtpm, dnsmasq and tmux. - -Then simply run: - -```bash -sudo ./launch-constellation.sh -``` diff --git a/dev-docs/howto/bare-metal/launch-constellation.sh b/dev-docs/howto/bare-metal/launch-constellation.sh deleted file mode 100644 index 27fc4f83b..000000000 --- a/dev-docs/howto/bare-metal/launch-constellation.sh +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -set -x - -function cleanup { - kill -SIGTERM "$(cat "${PWD}"/qemu-dnsmasq-br0.pid)" || true - rm "${PWD}"/qemu-dnsmasq-br0.pid || true - - kill -SIGTERM "$(cat "${PWD}"/swtpm0.pid)" || true - kill -SIGTERM "$(cat "${PWD}"/swtpm1.pid)" || true - - ip l delete br0 || true - ip l delete tap0 || true - ip l delete tap1 || true - - rm -r "${PWD}"/tpm0 || true - rm -r "${PWD}"/tpm1 || true - - rm OVMF_VARS_0.fd || true - rm OVMF_VARS_1.fd || true - - rm dnsmasq.leases || true - rm dnsmasq.log || true - - rm constellation-mastersecret.json || true - rm constellation-admin.conf || true - rm constellation-cluster.log || true - rm constellation-debug.log || true - rm constellation-state.yaml || true - rm -r constellation-upgrade || true - - docker stop metadata-server || true -} - -trap cleanup EXIT - -get_mac() { - printf '52:54:%02X:%02X:%02X:%02X' $((RANDOM % 256)) $((RANDOM % 256)) $((RANDOM % 256)) $((RANDOM % 256)) -} - -mac_0=$(get_mac) -mac_1=$(get_mac) - -# Regarding network setup see: https://bbs.archlinux.org/viewtopic.php?id=207907 - -dd if=/dev/zero of=disk0.img iflag=fullblock bs=1M count=10000 && sync -dd if=/dev/zero of=disk1.img iflag=fullblock bs=1M count=10000 && sync - -DEFAULT_INTERFACE=$(ip r show default | cut -d' ' -f5) - -ip link add name br0 type bridge || true -ip addr add 10.42.0.1/16 dev br0 || true -ip link set br0 up - -dnsmasq \ - --pid-file="${PWD}"/qemu-dnsmasq-br0.pid \ - --interface=br0 \ - --bind-interfaces \ - --log-facility="${PWD}"/dnsmasq.log \ - --dhcp-range=10.42.0.2,10.42.255.254 \ - --dhcp-leasefile="${PWD}"/dnsmasq.leases \ - --dhcp-host="${mac_0}",10.42.1.1,control-plane0 \ - --dhcp-host="${mac_1}",10.42.2.1,worker0 - -password=$(tr -dc 'A-Za-z0-9!?%=' < /dev/urandom | head -c 32) || true -password_hex=$(echo -n "${password}" | xxd -p -u -c 256) -echo "${password_hex}" - -# htpasswd from apache2-utils -password_bcrypt=$(htpasswd -bnBC 10 "" "${password}" | tr -d ':\n') - -docker run \ - -dit \ - --rm \ - --name metadata-server \ - --net=host \ - --mount type=bind,source="$(pwd)"/dnsmasq.leases,target=/dnsmasq.leases \ - ghcr.io/edgelesssys/constellation/qemu-metadata-api:v2.17.0-pre.0.20240603111213-d7ce6af383f2 \ - --dnsmasq-leases /dnsmasq.leases --initsecrethash "${password_bcrypt}" - -cat > ./constellation-state.yaml <<- EOM -version: v1 # Schema version of this state file. -# State of the cluster's cloud resources. These values are retrieved during -infrastructure: - uid: qemu # Unique identifier the cluster's cloud resources are tagged with. - clusterEndpoint: 10.42.1.1 # Endpoint the cluster can be reached at. This is the endpoint that is being used by the CLI. - inClusterEndpoint: 10.42.1.1 # The Cluster uses to reach itself. This might differ from the ClusterEndpoint in case e.g., - initSecret: "${password_hex}" # Secret used to authenticate the bootstrapping node. - # List of Subject Alternative Names (SANs) to add to the Kubernetes API server certificate. - apiServerCertSANs: - - 10.42.1.1 - name: mini-qemu # Name used in the cluster's named resources. - ipCidrNode: 10.42.0.0/16 # CIDR range of the cluster's nodes. -# DO NOT EDIT. State of the Constellation Kubernetes cluster. -clusterValues: - clusterID: "" # Unique identifier of the cluster. - ownerID: "" # Unique identifier of the owner of the cluster. - measurementSalt: "" # Salt used to generate the ClusterID on the bootstrapping node. -EOM - -sysctl net.ipv4.ip_forward=1 -sysctl net.ipv6.conf.default.forwarding=1 -sysctl net.ipv6.conf.all.forwarding=1 - -iptables -t nat -C POSTROUTING -o "${DEFAULT_INTERFACE}" -j MASQUERADE || iptables -t nat -I POSTROUTING -o "${DEFAULT_INTERFACE}" -j MASQUERADE -iptables -C FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT || iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -iptables -P FORWARD ACCEPT - -ip tuntap add dev tap0 mode tap user "${USER}" || true -ip link set tap0 up promisc on -ip link set tap0 master br0 - -iptables -C FORWARD -i tap0 -o "${DEFAULT_INTERFACE}" -j ACCEPT || iptables -I FORWARD -i tap0 -o "${DEFAULT_INTERFACE}" -j ACCEPT - -ip tuntap add dev tap1 mode tap user "${USER}" || true -ip link set tap1 up promisc on -ip link set tap1 master br0 - -iptables -C FORWARD -i tap1 -o "${DEFAULT_INTERFACE}" -j ACCEPT || iptables -I FORWARD -i tap1 -o "${DEFAULT_INTERFACE}" -j ACCEPT - -# -# ovmf -# - -cp AMDSEV/usr/local/share/qemu/OVMF_VARS.fd OVMF_VARS_0.fd -cp AMDSEV/usr/local/share/qemu/OVMF_VARS.fd OVMF_VARS_1.fd - -# -# swtpm -# - -mkdir "${PWD}"/tpm0 || true -swtpm_setup --tpm2 --tpmstate "${PWD}/tpm0" --create-ek-cert --create-platform-cert --allow-signing --overwrite --pcr-banks - --logfile "${PWD}/tpm0/setup.log" -swtpm socket --tpm2 --tpmstate dir="${PWD}/tpm0",mode=0600 --ctrl type=unixio,path="${PWD}/tpm0/swtpm-sock" --log file="${PWD}/tpm0/tpm.log",level=20,truncate --pid file="${PWD}/swtpm0.pid" & - -mkdir "${PWD}"/tpm1 || true -swtpm_setup --tpm2 --tpmstate "${PWD}/tpm1" --create-ek-cert --create-platform-cert --allow-signing --overwrite --pcr-banks - --logfile "${PWD}/tpm1/setup.log" -swtpm socket --tpm2 --tpmstate dir="${PWD}/tpm1",mode=0600 --ctrl type=unixio,path="${PWD}/tpm1/swtpm-sock" --log file="${PWD}/tpm1/tpm.log",level=20,truncate --pid file="${PWD}/swtpm1.pid" & - -tmux new-session -d -s const-sess - -tmux split-window -tmux split-window - -launch_cmd_base_sev="AMDSEV/usr/local/bin/qemu-system-x86_64 \ - -enable-kvm \ - -cpu EPYC-v4 \ - -machine q35,smm=off \ - -smp 4,maxcpus=255 \ - -m 2048M,slots=5,maxmem=$((2048 + 8192))M \ - -no-reboot \ - -bios AMDSEV/usr/local/share/qemu/OVMF_CODE.fd \ - -drive file=./image.raw,if=none,id=disk1,format=raw,readonly=on \ - -device virtio-blk-pci,drive=disk1,id=virtio-disk1,disable-legacy=on,iommu_platform=true,bootindex=1 \ - -machine memory-encryption=sev0,vmport=off \ - -object memory-backend-memfd,id=ram1,size=2048M,share=true,prealloc=false \ - -machine memory-backend=ram1 \ - -object sev-snp-guest,id=sev0,cbitpos=51,reduced-phys-bits=1 \ - -nographic \ - -device virtio-blk-pci,drive=disk2,id=virtio-disk2 \ - -tpmdev emulator,id=tpm0,chardev=chrtpm \ - -device tpm-crb,tpmdev=tpm0" - -# shellcheck disable=2034 -launch_cmd_base_no_sev="AMDSEV/usr/local/bin/qemu-system-x86_64 \ - -enable-kvm \ - -cpu EPYC-v4 \ - -machine q35 \ - -smp 1,maxcpus=255 \ - -m 2048M,slots=5,maxmem=10240M \ - -no-reboot \ - -drive if=pflash,format=raw,unit=0,file=${PWD}/OVMF_CODE.fd,readonly=true \ - -drive file=./image.raw,if=none,id=disk1,format=raw,readonly=on \ - -device virtio-blk-pci,drive=disk1,id=virtio-disk1,disable-legacy=on,iommu_platform=true,bootindex=1 \ - -nographic \ - -device virtio-blk-pci,drive=disk2,id=virtio-disk2 \ - -tpmdev emulator,id=tpm0,chardev=chrtpm \ - -device tpm-crb,tpmdev=tpm0" - -launch_cmd_0="${launch_cmd_base_sev} \ - -drive if=pflash,format=raw,unit=0,file=${PWD}/OVMF_VARS_0.fd \ - -device virtio-net,netdev=network0,mac=${mac_0} \ - -netdev tap,id=network0,ifname=tap0,script=no,downscript=no \ - -drive file=./disk0.img,id=disk2,if=none,format=raw \ - -chardev socket,id=chrtpm,path=${PWD}/tpm0/swtpm-sock" -launch_cmd_1="${launch_cmd_base_sev} \ - -drive if=pflash,format=raw,unit=0,file=${PWD}/OVMF_VARS_1.fd \ - -device virtio-net,netdev=network0,mac=${mac_1} \ - -netdev tap,id=network0,ifname=tap1,script=no,downscript=no \ - -drive file=./disk1.img,id=disk2,if=none,format=raw \ - -chardev socket,id=chrtpm,path=${PWD}/tpm1/swtpm-sock" - -init_cmd="./constellation apply --skip-phases infrastructure" - -tmux send -t const-sess:0.0 "${launch_cmd_0}" ENTER -sleep 3 -tmux send -t const-sess:0.1 "${launch_cmd_1}" ENTER -tmux send -t const-sess:0.2 "${init_cmd}" ENTER - -tmux a -t const-sess diff --git a/dev-docs/howto/kubeconfigs.md b/dev-docs/howto/kubeconfigs.md deleted file mode 100644 index 0324e9bdc..000000000 --- a/dev-docs/howto/kubeconfigs.md +++ /dev/null @@ -1,88 +0,0 @@ -# How to create kubeconfigs for users - -One of the first things to do after setting up a Constellation cluster is to hand out kubeconfig files to its prospective users. -Adhering to the *principle of least privilege*, it is not advisable to share the admin config with all cluster users. -Instead, users should authenticate individually to the API server, and permissions should be controlled by [RBAC]. - -Constellation users authenticate to the API server with a client TLS certificate, signed by the Kubernetes CA. -The user's identity and group memberships are taken from the certificates common name and organizations, respectively. -Details can be found in the upstream [authn documentation]. - -The [`kubeadm` documentation] describes a process for creating new kubeconfigs, but the instructions requires access to a control-plane node, or at least the Kubernetes CA certificate and key. -While the certificates can be extracted, e.g. by spawning a [node debugger pod], we can take a safer road that only requires `kubectl`. -The example script below creates a new kubeconfig for a user and optional group memberships. -It uses the [Kubernetes certificate API] to obtain a user certificate signed by the cluster CA. - -[RBAC]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[authn documentation]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#users-in-kubernetes -[`kubeadm` documentation]: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubeconfig-additional-users -[node debugger pod]: https://kubernetes.io/docs/tasks/debug/debug-cluster/kubectl-node-debug/ -[Kubernetes certificate API]: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ - -```sh -#!/bin/sh -set -eu - -if [ $# -lt 2 ]; then - echo "Usage: $0 username [groupname...]" >&2 - exit 1 -fi - -user=$1 -shift - -subj="/CN=${user}" -for g in "$@"; do - subj="${subj}/O=$g" -done - -openssl req -newkey rsa:4096 -out ${user}.csr -keyout ${user}.key -nodes -subj "${subj}" - -kubectl apply -f - <${user}.pem -kubectl delete csr ${user} - -kubectl get cm kube-root-ca.crt -o go-template='{{ index .data "ca.crt" }}' >ca.pem -kubectl get cm kubeadm-config -n kube-system -o=jsonpath="{.data.ClusterConfiguration}" >clusterconfig.yaml -cluster=$(yq .clusterName clusterconfig.yaml) -endpoint=$(yq .controlPlaneEndpoint clusterconfig.yaml) - -cat >${user}.conf <, , ) -``` - -The field `payload` is controlled by the software running inside the CVM. -In the case of a Constellation node, the `payload` is always the public key of the respective Bootstrapper running inside the CVM. -Thus, `R` can be seen as a certificate for that public key issued by the CPU. -Based on this, nodes establish attested TLS (aTLS) connections. -aTLS is used during [cluster creation](#cluster-creation) and when [growing a cluster](#cluster-growth). - -The field `auxiliary data` is populated automatically by the CVM platform and, among others, includes information like CPU firmware versions. - -Note that this description of `R` is highly abstract. - -### Measurements - -In the ideal case, the underlying CVM platform does not inject any of its own software into a CVM. -In that case, a Constellation node image can contain its own firmware/UEFI. -This allows for the creation of node images for which the launch digest covers all defining parts of a node, including the firmware, the kernel, the kernel command line, and the disk image. -In this case, the launch digest is the only measurement that's required to verify the identity and integrity of a node. - -### Measured Boot as fallback - -However, currently, all supported CVM platforms (AWS, Azure, and GCP) inject custom firmware into CVMs. -Thus, in practice, Constellation relies on conventional [measured boot](https://docs.edgeless.systems/constellation/architecture/images#measured-boot) to reflect the identity and integrity of nodes. - -In measured boot, in general, the software components involved in the boot process of a system are "measured" into the 16 platform configuration registers (PCRs) of a Trusted Platform Module (TPM). -The values of these registers are also called "runtime measurements". -All supported CVM platforms provide TPMs to CVMs. - -With measured boot, Constellation relies on TPM-based remote attestation for nodes. -TPM-based remote attestation is similar to confidential computing-based remote attestation. Instead of the value `R`, the value `R'` is used. - -``` -R' = Sig-TPM(, ) -``` - -The field `auxiliary data` is populated automatically by the TPM and most notably contains the 16 PCRs. -Constellation uses the field `payload` as usual and sets it to the public key of the respective CVM's Bootstrapper. -When verifying `R'`, Constellation compares the 16 PCRs to those given in the attestation config. - -#### Differences between CVM platforms - -Each supported CVM platform populates the 16 PCRs in different ways. Details can be found in the [Constellation documentation](https://docs.edgeless.systems/constellation/architecture/attestation#runtime-measurements). -Sig-TPM itself is also verified differently for each cloud. - -Currently, on AWS and GCP the TPM implementation resides outside the CVM. -On Azure, the TPM implementation is part of the injected firmware and resides inside the CVM. -More information can be found in the [Constellation documentation](https://docs.edgeless.systems/constellation/overview/clouds). - -## Kubernetes bootstrapping on the first node - -The Bootstrapper on the first node downloads and verifies the Kubernetes binaries, using the hashes it received from the CLI. -These binaries include for example [kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/), [kube-apiserver](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/), and [etcd](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/). -With these, the Bootstrapper creates a single-node Kubernetes cluster. -Etcd is a distributed key-value store that Kubernetes uses to store configuration data for services. -The etcd agent runs on each control-plane node of a cluster. -The agents use mTLS for communication between them. -Etcd uses the Raft protocol (over mTLS) to distribute state between nodes. -All essential configuration data of a cluster is kept in etcd. - -Initially, the Bootstrapper on the first node [writes](https://github.com/edgelesssys/constellation/blob/d65987cb15cf9ebdbbd2975f177937c1acbc90f8/bootstrapper/internal/kubernetes/kubernetes.go#L173) the hashes of the expected Kubernetes binaries to a specific key in etcd. - -Next, the CLI the connects to the Kubernetes API server (kube-apiserver) using the kubeconfig file it received from the Bootstrapper. -This results in an mTLS connection between the CLI and the Kubernetes API server. -The CLI uses this connection for two essential operations at the Kubernetes level: - -1. It writes the attestation config to a specific key in etcd. -1. It executes the [hardcoded Helm charts](#cli-root-of-trust), which, most notably, install the three core services KeyService, JoinService, and VerificationService, the [constellation-node-operator](https://github.com/edgelesssys/constellation/tree/main/operators/constellation-node-operator), and a small number of standard services like Cilium and cert-manager. - -The latter causes the first node to download, verify, and run the containers defined in the Helm charts. -The containers that are specific to Constellation are hosted at `ghcr.io/edgelesssys`. - -After this, the Constellation cluster is operational on the first node. - -## Cluster growth - -Additional nodes can now join the cluster - either as control-plane nodes or as worker nodes. -For both, the process for joining the cluster is identical. -First, the Bootstrapper running on a *new node* contacts the JoinService of the existing cluster. -The JoinService verifies the remote-attestation statement of the new node using the attestation config stored in etcd. -On success, an aTLS connection between the two is created, which is used mainly for the following (see the the [interface definition](https://github.com/edgelesssys/constellation/blob/main/joinservice/joinproto/join.proto) for a comprehensive list of exchanged data): - -1. The new node sends a certificate signing request (CSR) for its *node certificate* to the JoinService. -1. The JoinService issues a corresponding certificate with a lifetime of one year and sends it to the new node. -The JoinService uses the signing key of the Kubernetes cluster for this, which is [generated by kubeadm](https://kubernetes.io/docs/setup/best-practices/certificates/). Note that the lifetime of the node certificate is a best practice only, as Constellation relies on the untrusted infrastructure to provide time when validating certificates. -1. The JoinService sends a [kubeadm token](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-token/) to the new node. -1. The JoinService sends the hashes of the expected Kubernetes binaries to the new node. -1. The JoinService sends encryption key for the new node's local storage. -This key is generated and managed by the cluster's KeyService. -1. The JoinService sends the certificate of the cluster's control plane to the new node. - -After this, the aTLS connection is closed and the node is marked as "initialized" in the same way as described [above](#cluster-creation). - -The Bootstrapper downloads, verifies, and runs the given Kubernetes binaries. -Further, the Bootstrapper uses the kubeadm token to download the configuration of the cluster from the Kubernetes API server. -The kubeadm token is never used after this. - -The kubelet on the new node uses its own node certificate and the certificate of the cluster's control plane (which the new node both received from the JoinService) to establish an mTLS connection with the cluster's Kubernetes API server. -Once connected, the new node registers itself as control-plane node or worker node of the cluster. -This process uses the standard Kubernetes mechanisms for adding nodes. - -In Constellation, a virtual private network (VPN) exists between all nodes of a cluster. -This VPN is created with the help of Cilium. -To join this VPN, the new node generates WireGuard credentials for itself and writes the public key to etcd via the mTLS connection with the Kubernetes API server. -It also downloads the public keys of existing nodes from etcd. -Subsequently, the Cilium agents running on other nodes fetch the new node's public key from etcd as well. - -Note that etcd communication between nodes is an exception: This traffic always goes via mTLS based on node certificates. - -## Cluster upgrade - -Whenever a cluster is [upgraded](https://docs.edgeless.systems/constellation/workflows/upgrade), the CLI connects to the Kubernetes API server and, essentially, updates the following data in etcd: - -1. The attestation config -1. The hashes of the expected Kubernetes binaries - -Further, the CLI applies updated Helm charts to update the cluster's services. -Again, these Helm charts are hardcoded in the CLI. -See the [implementation](https://github.com/edgelesssys/constellation/blob/d65987cb15cf9ebdbbd2975f177937c1acbc90f8/cli/internal/cmd/apply.go#L358) of the `apply()` function for a sequence diagram of all steps. -Subsequently, the constellation-node-operator replaces existing nodes with new ones. -New nodes go through the [usual process for joining the cluster](#cluster-growth). - -## Examples - -This section gives real life examples of key data structures and the corresponding commands to retrieve those. - -### Attestation config - -```bash -kubectl -n kube-system get cm join-config -o json -``` -```json -{ - "apiVersion": "v1", - "binaryData": { - "measurementSalt": "2A4Fzfdr/61XbJvk1PDqzh0R4rVnEujyXudsfgRZzUY=" - }, - "data": { - "attestationConfig": "{\"measurements\":{\"1\":{\"expected\":\"3695dcc55e3aa34027c27793c85c723c697d708c42d1f73bd6fa4f26608a5b24\",\"warnOnly\":true},\"11\":{\"expected\":\"f09cef0d077127fb26bc8d013fc09e13afbb70f0f734ced98f46666544998efe\",\"warnOnly\":true},\"12\":{\"expected\":\"0000000000000000000000000000000000000000000000000000000000000000\",\"warnOnly\":true},\"13\":{\"expected\":\"0000000000000000000000000000000000000000000000000000000000000000\",\"warnOnly\":true},\"14\":{\"expected\":\"0000000000000000000000000000000000000000000000000000000000000000\",\"warnOnly\":true},\"15\":{\"expected\":\"0000000000000000000000000000000000000000000000000000000000000000\",\"warnOnly\":true},\"2\":{\"expected\":\"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969\",\"warnOnly\":true},\"3\":{\"expected\":\"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969\",\"warnOnly\":true},\"4\":{\"expected\":\"e5020193148fbad0dbaf618fb3ef15665c72ff87a54e24b2d8f5bdf9719bb50b\",\"warnOnly\":true},\"6\":{\"expected\":\"3d458cfe55cc03ea1f443f1562beec8df51c75e14a9fcf9a7234a13f198e7969\",\"warnOnly\":true},\"8\":{\"expected\":\"0000000000000000000000000000000000000000000000000000000000000000\",\"warnOnly\":true},\"9\":{\"expected\":\"37ef16fd0ae8d2fb3b1914f0b8ff046e765b57fec6739d2ebf1fd4d182071437\",\"warnOnly\":true}},\"bootloaderVersion\":\"latest\",\"teeVersion\":\"latest\",\"snpVersion\":\"latest\",\"microcodeVersion\":\"latest\",\"amdRootKey\":\"-----BEGIN CERTIFICATE-----\\nMIIGYzCCBBKgAwIBAgIDAQAAMEYGCSqGSIb3DQEBCjA5oA8wDQYJYIZIAWUDBAIC\\nBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiAwIBMKMDAgEBMHsxFDAS\\nBgNVBAsMC0VuZ2luZWVyaW5nMQswCQYDVQQGEwJVUzEUMBIGA1UEBwwLU2FudGEg\\nQ2xhcmExCzAJBgNVBAgMAkNBMR8wHQYDVQQKDBZBZHZhbmNlZCBNaWNybyBEZXZp\\nY2VzMRIwEAYDVQQDDAlBUkstTWlsYW4wHhcNMjAxMDIyMTcyMzA1WhcNNDUxMDIy\\nMTcyMzA1WjB7MRQwEgYDVQQLDAtFbmdpbmVlcmluZzELMAkGA1UEBhMCVVMxFDAS\\nBgNVBAcMC1NhbnRhIENsYXJhMQswCQYDVQQIDAJDQTEfMB0GA1UECgwWQWR2YW5j\\nZWQgTWljcm8gRGV2aWNlczESMBAGA1UEAwwJQVJLLU1pbGFuMIICIjANBgkqhkiG\\n9w0BAQEFAAOCAg8AMIICCgKCAgEA0Ld52RJOdeiJlqK2JdsVmD7FktuotWwX1fNg\\nW41XY9Xz1HEhSUmhLz9Cu9DHRlvgJSNxbeYYsnJfvyjx1MfU0V5tkKiU1EesNFta\\n1kTA0szNisdYc9isqk7mXT5+KfGRbfc4V/9zRIcE8jlHN61S1ju8X93+6dxDUrG2\\nSzxqJ4BhqyYmUDruPXJSX4vUc01P7j98MpqOS95rORdGHeI52Naz5m2B+O+vjsC0\\n60d37jY9LFeuOP4Meri8qgfi2S5kKqg/aF6aPtuAZQVR7u3KFYXP59XmJgtcog05\\ngmI0T/OitLhuzVvpZcLph0odh/1IPXqx3+MnjD97A7fXpqGd/y8KxX7jksTEzAOg\\nbKAeam3lm+3yKIcTYMlsRMXPcjNbIvmsBykD//xSniusuHBkgnlENEWx1UcbQQrs\\n+gVDkuVPhsnzIRNgYvM48Y+7LGiJYnrmE8xcrexekBxrva2V9TJQqnN3Q53kt5vi\\nQi3+gCfmkwC0F0tirIZbLkXPrPwzZ0M9eNxhIySb2npJfgnqz55I0u33wh4r0ZNQ\\neTGfw03MBUtyuzGesGkcw+loqMaq1qR4tjGbPYxCvpCq7+OgpCCoMNit2uLo9M18\\nfHz10lOMT8nWAUvRZFzteXCm+7PHdYPlmQwUw3LvenJ/ILXoQPHfbkH0CyPfhl1j\\nWhJFZasCAwEAAaN+MHwwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSFrBrRQ/fI\\nrFXUxR1BSKvVeErUUzAPBgNVHRMBAf8EBTADAQH/MDoGA1UdHwQzMDEwL6AtoCuG\\nKWh0dHBzOi8va2RzaW50Zi5hbWQuY29tL3ZjZWsvdjEvTWlsYW4vY3JsMEYGCSqG\\nSIb3DQEBCjA5oA8wDQYJYIZIAWUDBAICBQChHDAaBgkqhkiG9w0BAQgwDQYJYIZI\\nAWUDBAICBQCiAwIBMKMDAgEBA4ICAQC6m0kDp6zv4Ojfgy+zleehsx6ol0ocgVel\\nETobpx+EuCsqVFRPK1jZ1sp/lyd9+0fQ0r66n7kagRk4Ca39g66WGTJMeJdqYriw\\nSTjjDCKVPSesWXYPVAyDhmP5n2v+BYipZWhpvqpaiO+EGK5IBP+578QeW/sSokrK\\ndHaLAxG2LhZxj9aF73fqC7OAJZ5aPonw4RE299FVarh1Tx2eT3wSgkDgutCTB1Yq\\nzT5DuwvAe+co2CIVIzMDamYuSFjPN0BCgojl7V+bTou7dMsqIu/TW/rPCX9/EUcp\\nKGKqPQ3P+N9r1hjEFY1plBg93t53OOo49GNI+V1zvXPLI6xIFVsh+mto2RtgEX/e\\npmMKTNN6psW88qg7c1hTWtN6MbRuQ0vm+O+/2tKBF2h8THb94OvvHHoFDpbCELlq\\nHnIYhxy0YKXGyaW1NjfULxrrmxVW4wcn5E8GddmvNa6yYm8scJagEi13mhGu4Jqh\\n3QU3sf8iUSUr09xQDwHtOQUVIqx4maBZPBtSMf+qUDtjXSSq8lfWcd8bLr9mdsUn\\nJZJ0+tuPMKmBnSH860llKk+VpVQsgqbzDIvOLvD6W1Umq25boxCYJ+TuBoa4s+HH\\nCViAvgT9kf/rBq1d+ivj6skkHxuzcxbk1xv6ZGxrteJxVH7KlX7YRdZ6eARKwLe4\\nAFZEAwoKCQ==\\n-----END CERTIFICATE-----\\n\",\"amdSigningKey\":\"\"}" - }, - "kind": "ConfigMap", - "metadata": { - "creationTimestamp": "2024-09-25T11:11:50Z", - "name": "join-config", - "namespace": "kube-system", - "resourceVersion": "387", - "uid": "fdd0d5eb-cf58-4608-99c9-eede08895615" - } -} -``` -### Hashes of Kubernetes binaries -```bash -kubectl -n kube-system get cm k8s-components-sha256-7b73c7675df78e5753b6e0fc86a9982127fd16141837599d5ce16df6bfe6a2a0 -o json -``` -```json -{ - "apiVersion": "v1", - "data": { - "cluster-version": "v1.29.8", - "components": "[{\"url\":\"https://github.com/containernetworking/plugins/releases/download/v1.4.0/cni-plugins-linux-amd64-v1.4.0.tgz\",\"hash\":\"sha256:c2485ddb3ffc176578ae30ae58137f0b88e50f7c7f2af7d53a569276b2949a33\",\"install_path\":\"/opt/cni/bin\",\"extract\":true},{\"url\":\"https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz\",\"hash\":\"sha256:d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb\",\"install_path\":\"/run/state/bin\",\"extract\":true},{\"url\":\"https://storage.googleapis.com/kubernetes-release/release/v1.29.8/bin/linux/amd64/kubelet\",\"hash\":\"sha256:df6e130928403af8b4f49f1197e26f2873a147cd0e23aa6597a26c982c652ae0\",\"install_path\":\"/run/state/bin/kubelet\"},{\"url\":\"https://storage.googleapis.com/kubernetes-release/release/v1.29.8/bin/linux/amd64/kubeadm\",\"hash\":\"sha256:fe054355e0ae8dc35d868a3d3bc408ccdff0969c20bf7a231ae9b71484e41be3\",\"install_path\":\"/run/state/bin/kubeadm\"},{\"url\":\"https://storage.googleapis.com/kubernetes-release/release/v1.29.8/bin/linux/amd64/kubectl\",\"hash\":\"sha256:038454e0d79748aab41668f44ca6e4ac8affd1895a94f592b9739a0ae2a5f06a\",\"install_path\":\"/run/state/bin/kubectl\"},{\"url\":\"data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjI5LjhAc2hhMjU2OjZmNzJmYTkyNmM5YjA1ZTEwNjI5ZmUxYTA5MmZkMjhkY2Q2NWI0ZmRmZDBjYzdiZDU1Zjg1YTU3YTZiYTFmYTUifV0=\",\"install_path\":\"/opt/kubernetes/patches/kube-apiserver+json.json\"},{\"url\":\"data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjI5LjhAc2hhMjU2OjZmMjdkNjNkZWQyMDYxNGM2ODU1NGI0NzdjZDdhNzhlZGE3OGE0OThhOTJiZmU4OTM1Y2Y5NjRjYTViNzRkMGIifV0=\",\"install_path\":\"/opt/kubernetes/patches/kube-controller-manager+json.json\"},{\"url\":\"data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjI5LjhAc2hhMjU2OmRhNzRhNjY2NzVkOTVlMzllYzI1ZGE1ZTcwNzI5ZGE3NDZkMGZhMGIxNWVlMGRhODcyYWM5ODA1MTliYzI4YmQifV0=\",\"install_path\":\"/opt/kubernetes/patches/kube-scheduler+json.json\"},{\"url\":\"data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjEyLTBAc2hhMjU2OjQ0YThlMjRkY2JiYTM0NzBlZTFmZWUyMWQ1ZTg4ZDEyOGM5MzZlOWI1NWQ0YmM1MWZiZWY4MDg2ZjhlZDEyM2IifV0=\",\"install_path\":\"/opt/kubernetes/patches/etcd+json.json\"}]" - }, - "immutable": true, - "kind": "ConfigMap", - "metadata": { - "creationTimestamp": "2024-09-25T11:11:50Z", - "name": "k8s-components-sha256-7b73c7675df78e5753b6e0fc86a9982127fd16141837599d5ce16df6bfe6a2a0", - "namespace": "kube-system", - "resourceVersion": "356", - "uid": "6389c186-3bc8-4470-8af5-f6fed1addd69" - } -} -``` \ No newline at end of file diff --git a/dev-docs/workflows/attestationconfigapi.md b/dev-docs/workflows/attestationconfigapi.md index 4881497ce..5da8eda35 100644 --- a/dev-docs/workflows/attestationconfigapi.md +++ b/dev-docs/workflows/attestationconfigapi.md @@ -8,10 +8,10 @@ This estimate might make manual intervention necessary when a global rollout did ### Manually delete a version ``` -COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY="$(cat $PATH_TO_KEY)" AWS_ACCESS_KEY_ID=$ID AWS_ACCESS_KEY=$KEY bazel run //internal/api/attestationconfigapi/cli -- delete azure-sev-snp attestation-report 2025-01-18-09-15 +COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY="$(cat $PATH_TO_KEY)" AWS_ACCESS_KEY_ID=$ID AWS_ACCESS_KEY=$KEY bazel run //internal/api/attestationconfigapi/cli delete -- --version 2023-09-02-12-52 ``` ### Manually upload a version ``` -COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY="$(cat $PATH_TO_KEY)" AWS_ACCESS_KEY_ID=$ID AWS_ACCESS_KEY=$KEY bazel run //internal/api/attestationconfigapi/cli -- upload azure-sev-snp attestation-report 2025-01-18-09-15 --force +COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY="$(cat $PATH_TO_KEY)" AWS_ACCESS_KEY_ID=$ID AWS_ACCESS_KEY=$KEY bazel run //internal/api/attestationconfigapi/cli -- --force --version 2023-09-02-12-52 --maa-claims-path "${path}" ``` diff --git a/dev-docs/workflows/bazel.md b/dev-docs/workflows/bazel.md index 8dc38b7ea..71fda6bc5 100644 --- a/dev-docs/workflows/bazel.md +++ b/dev-docs/workflows/bazel.md @@ -3,13 +3,6 @@ Bazel is the primary build system for this project. It is used to build all Go code and will be used to build all artifacts in the future. Still, we aim to keep the codebase compatible with `go build` and `go test` as well. Whenever Go code is changed, you will have to run `bazel run //:tidy` to regenerate the Bazel build files for Go code. -Additionally, you need to update `MODULE.bazel`, together with `MODULE.bazel.lock`: - -``` -# if the steps below fail, try to recreate the lockfile from scratch by deleting it -bazel mod deps --lockfile_mode=update -bazel mod tidy -``` ## Bazel commands @@ -53,6 +46,17 @@ Also note that some errors shown in `check` (non-silent mode) by `golicenses_che * `bazel query //subfolder` - list all targets in a subfolder * `bazel cquery --output=files //subfolder:target` - get location of a build artifact +### (Optional) Remote caching and execution + +We use BuildBuddy for remote caching (and maybe remote execution in the future). To use it, you need to join the BuildBuddy organization and get an API key. Then, you can write it to `~/.bazelrc`: + +```sh +build --remote_header=x-buildbuddy-api-key= +``` + +To use the remote cache, build the project with `bazel build --config remote_cache //path/to:target`. +You can also copy the `remote_cache` config from `.bazelrc` to your `~/.bazelrc` and remove the `remote_cache` prefix to make it the default. + ## Setup ### VS Code integration diff --git a/dev-docs/workflows/bump-go-version.md b/dev-docs/workflows/bump-go-version.md index f2736179b..4d0d23de2 100644 --- a/dev-docs/workflows/bump-go-version.md +++ b/dev-docs/workflows/bump-go-version.md @@ -1,41 +1,10 @@ # Bump Go version - `govulncheck` from the bazel `check` target will fail if our code is vulnerable, which is often the case when a patch version was released with security fixes. ## Steps -Replace `"1.xx.x"` with the new version in [MODULE.bazel](/MODULE.bazel): +Replace "1.xx.x" with the new version in [WORKSPACE.bazel](/WORKSPACE.bazel): ```starlark -go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download( - name = "go_sdk", - patches = ["//3rdparty/bazel/org_golang:go_tls_max_handshake_size.patch"], - version = "1.xx.x", <--- Replace this one - ~~~~~~~~ -) - -``` - -Replace `go-version: "1.xx.x"` with the new version in all GitHub actions/workflows, our go.mod files and Containerfiles. -You can use the following command to find replace all instances of `go-version: "1.xx.x"` in the `.github` directory: - -```bash -OLD_VERSION="1.xx.x" -NEW_VERSION="1.xx.y" -find .github -type f -exec sed -i "s/go-version: \"${OLD_VERSION}\"/go-version: \"${NEW_VERSION}\"/g" {} \; -sed -i "s/go ${OLD_VERSION}/go ${NEW_VERSION}/g" go.mod -sed -i "s/go ${OLD_VERSION}/go ${NEW_VERSION}/g" hack/tools/go.mod -sed -i "s/${OLD_VERSION}/${NEW_VERSION}/g" go.work -sed -i "s/GO_VER=${OLD_VERSION}/GO_VER=${NEW_VERSION}/g" 3rdparty/gcp-guest-agent/Dockerfile -``` - -Or manually: - -```yaml -- name: Setup Go environment - uses: actions/setup-go@v5 - with: - go-version: "1.xx.x" <--- Replace this one - ~~~~~~~~ +go_register_toolchains(version = "1.xx.x") ``` diff --git a/dev-docs/workflows/github-actions.md b/dev-docs/workflows/github-actions.md index 89f9f5d3c..ef97ed332 100644 --- a/dev-docs/workflows/github-actions.md +++ b/dev-docs/workflows/github-actions.md @@ -24,7 +24,7 @@ Here are some examples for test suites you might want to run. Values for `sonobu * `--mode certified-conformance` * For K8s conformance certification test suite -Check [Sonobuoy docs](https://sonobuoy.io/docs/v0.57.1/e2eplugin/) for more examples. +Check [Sonobuoy docs](https://sonobuoy.io/docs/latest/e2eplugin/) for more examples. When using `--mode` be aware that `--e2e-focus` and `e2e-skip` will be overwritten. [Check in the source code](https://github.com/vmware-tanzu/sonobuoy/blob/e709787426316423a4821927b1749d5bcc90cb8c/cmd/sonobuoy/app/modes.go#L130) what the different modes do. diff --git a/dev-docs/workflows/marketplace-publishing.md b/dev-docs/workflows/marketplace-publishing.md deleted file mode 100644 index aa0513b2f..000000000 --- a/dev-docs/workflows/marketplace-publishing.md +++ /dev/null @@ -1,33 +0,0 @@ -# Publishing Marketplace Images - -Constellation release images need to be manually published to AWS and Azure marketplaces due to the lack of automation features. -On GCP, marketplace image publishing is automated and takes place on release. - -This document explains how to perform the uploading on AWS and Azure. - -## AWS - -1. Log in to the [AWS marketplace management portal](https://aws.amazon.com/marketplace/management/) with your regular developer AWS account. -2. Select "Products -> Server -> Constellation" in the top menu. -3. Select "Versions" in the main menu and press "Add version". -4. Fill in the form. - 1. Enter the semantic version of the release (i.e. `vX.Y.Z`) as "Version title". - 2. Set the version tag in "Release notes" to the same version. - 3. For the "Amazon Machine Image (AMI) ID", enter the AMI ID of the release (SEV-SNP) image. This can be found in the regular - [AWS console](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#Images:visibility=owned-by-me;search=:constellation-v;v=3;$case=tags:false%5C,client:false;$regex=tags:false%5C,client:false;sort=desc:creationDate). - 4. For "IAM access role ARN", enter `arn:aws:iam::795746500882:role/constellation-marketplace-ingest`. -5. Leave the other fields as they are and press "Add version". -6. Wait for the [request](https://aws.amazon.com/marketplace/management/requests) to be processed and available before publishing the release. - -## Azure - -1. Log in to the [Microsoft partner center](https://partner.microsoft.com/en-us/dashboard/home) with your regular developer Microsoft account. -2. Select "Marketplace offers -> Constellation -> Constellation Node" in the main menu. -3. Select "Technical configuration" in the sidebar on the left. -4. Select "Add VM Image". - 1. For the "Version number", enter the semantic version of the release without the `v` prefix. If the release version is `vX.Y.Z`, enter `X.Y.Z`. - 2. Press "Add a gallery image" and select the corresponding "Constellation_CVM" image version in the menu. - 3. Press "Save VM image". -5. **IMPORTANT**: Hit **Save draft**. Do **NOT** hit "Review and publish" directly. -6. **After** saving the draft, hit "Review and publish". -7. Go back to the [offer's home page](https://partner.microsoft.com/en-us/dashboard/commercial-marketplace/offers/a53ac90b-06f7-4a20-a845-8607ca352e61/overview) and wait for the process to complete before publishing the release. diff --git a/dev-docs/workflows/qemu.md b/dev-docs/workflows/qemu.md index dd327d31b..3f294cc8b 100644 --- a/dev-docs/workflows/qemu.md +++ b/dev-docs/workflows/qemu.md @@ -20,7 +20,7 @@ Follow the steps in our [libvirt readme](../../nix/container/README.md) if you w ### Install required packages -[General reference](https://documentation.ubuntu.com/server/how-to/virtualisation/libvirt/) +[General reference](https://ubuntu.com/server/docs/virtualization-libvirt) ```shell-session sudo apt install qemu-kvm libvirt-daemon-system xsltproc diff --git a/dev-docs/workflows/release.md b/dev-docs/workflows/release.md index 619d28744..c7af9bc0f 100644 --- a/dev-docs/workflows/release.md +++ b/dev-docs/workflows/release.md @@ -46,8 +46,6 @@ Releases should be performed using [the automated release pipeline](https://gith 6. look over the autogenerated draft release. When fixing the changelog, prioritize updating the PR title/labels/description and regenerating the changelog over fixing things in the final changelog. The changelog should be primarily aimed at users. Rule of thumb: first part of the sentence should describe what changed for the user, second part can describe what has been changed to achieve this. 7. in the GitHub release UI, make sure the tag to create on release is set to `$ver`, and the target commit is set to the temporary release branch. 8. publish. -9. follow [post release steps](#post-release-steps). - ### Minor release @@ -80,16 +78,20 @@ Releases should be performed using [the automated release pipeline](https://gith 8. set the Target to `tmp/${ver}` 9. in the GitHub release UI, make sure the tag to create on release is set to `$ver`, and the target commit is set to the temporary release branch. 10. publish. -11. follow [post release steps](#post-release-steps). ## Post release steps 1. Publish the [provider release](https://github.com/edgelesssys/terraform-provider-constellation/releases) -2. Merge the automated post release PR -3. Publish the [AWS and Azure marketplace images](./marketplace-publishing.md). -4. Close fixed "known issues" -5. Move open issues and PRs from this release's closed milestone to next milestone -6. Reset `UpgradeRequiresIAMMigration` in [`iamupgrade.go`](https://github.com/edgelesssys/constellation/blob/a88a731576184e3c5ee8527741c4a0cdaa4e9b24/cli/internal/cloudcmd/iamupgrade.go#L23). +2. Merge the versioned docs PR +3. Close fixed "known issues" +4. Milestones management + 1. Create a new milestone for the next release + 2. Add the next release manager and an approximate release date to the milestone description + 3. Close the milestone for the release + 4. Move open issues and PRs from closed milestone to next milestone +5. If the release is a minor version release, bump the pre-release version in the `version.txt` file. +6. Update the `fromVersion` in `e2e-test-release.yml` and `e2e-test-weekly.yaml` to the newly released version. To check the current values, run: `grep "fromVersion: \[.*\]" -R .github`. +7. Reset `UpgradeRequiresIAMMigration` in [`iamupgrade.go`](https://github.com/edgelesssys/constellation/blob/a88a731576184e3c5ee8527741c4a0cdaa4e9b24/cli/internal/cloudcmd/iamupgrade.go#L23). ## Troubleshooting: Pipeline cleanup diff --git a/dev-docs/workflows/security-patch.md b/dev-docs/workflows/security-patch.md deleted file mode 100644 index 468c9b526..000000000 --- a/dev-docs/workflows/security-patch.md +++ /dev/null @@ -1,62 +0,0 @@ -# Security Patch Workflow - -This document describes how to patch vulnerabilities in Constellation. - -## Guiding Principles - -* Constellation vulnerabilities and security patches must be shared on need-to-know basis. -* A vulnerability is only fixed if a patch exists for all supported versions. -* Affected users must be informed about vulnerabilities affecting their setups. -* Vulnerabilities in Constellation should be fixed as quickly as possible. - -## Vulnerability Report - -Someone found a vulnerability in Constellation. -If they followed [SECURITY.md](/SECURITY.md), a Github Security Advisory (GHSA) might already exist. -Otherwise, now is the time to [create a draft](https://github.com/edgelesssys/constellation/security/advisories/new). -Make sure that the GHSA includes a problem statement that gives sufficient context and add domain experts as collaborators. - -## Mitigation - -Investigate possible mitigations for the vulnerability that don't require a patch release. -Such mitigations could include additional firewall settings, manual cluster configuration changes, etc. -Add all reasonable mitigation instructions to the GHSA. - -If the vulnerability has already been disclosed publicly, the GHSA should also be disclosed at this stage. -Add an ETA for a patch release and proceed with [disclosure steps](#disclosing-the-ghsa). - -## Fix - -The first step towards fixing the vulnerability is to assess the amount of work required. -Use this estimate to propose a target release date and inform the product manager about impact and timeline. -The product manager will notify customers of the upcoming patch release. - -Sometimes a fix can be developed quickly for `main`, but correctly backporting it takes more time. -It may also happen that a proposed fix needs substantial work before merging. -In order to avoid premature disclosure of the vulnerability, while still allowing for collaboration, we use the GHSA's temporary repository. - -1. On the drafted GHSA, create a temporary repository to work on a fix. -1. Develop a fix on a local branch, targeting `main`. -1. Manually run static checks (unit tests, linters, etc.). -1. Push the branch to the temporary fork and open a PR. - This is necessary because the fork can't run Github Actions. -1. Solicit and incorporate feedback on the PR. -1. Manually test a fixed version, possibly including upgrade tests. - -When the PR is ready, cherry-pick its commits to your local version of the release branch. -Repeat the steps above, but target the PR at the corresponding release branch. - -Once PRs are ready for all necessary patches, hit the merge button on the GHSA. -This will merge all PRs, but the GHSA will stay in draft mode for now. - -## Disclosing the GHSA - -The following steps need to be performed by a repository admin. - -1. Ensure that the GHSA text is in shape for publication. - In particular, look out for any empty sections and placeholder text. -1. Fill in the `Affected Versions` and `Patched Versions` fields. -1. Check that the severity setting is accurate. -1. Credit external collaborators, e.g. by @-mention. -1. Hit the `Publish Advisory` button. -1. Notify the product manager to share patch and advisory with customers. diff --git a/dev-docs/workflows/terraform-provider.md b/dev-docs/workflows/terraform-provider.md index 3dff8e290..7f255710f 100644 --- a/dev-docs/workflows/terraform-provider.md +++ b/dev-docs/workflows/terraform-provider.md @@ -12,7 +12,7 @@ The [`devbuild` target](./build-develop-deploy.md), will create a `terraform` di with the provider binary and some utility files in the dedicated local Terraform registry directory. ```bash -bazel run //:devbuild +bazel run //:devbuild' ``` > [!IMPORTANT] when making changes on the provider without a commit, subsequent applies will fail due to the changed binary hash. To solve this, in your Terraform directory run: diff --git a/dev-docs/workflows/upgrade-kubernetes.md b/dev-docs/workflows/upgrade-kubernetes.md index 99df23055..809a837d5 100644 --- a/dev-docs/workflows/upgrade-kubernetes.md +++ b/dev-docs/workflows/upgrade-kubernetes.md @@ -27,21 +27,6 @@ curl -qL https://mcr.microsoft.com/v2/oss/kubernetes/azure-cloud-node-manager/ta Normally renovate will handle the upgrading of Kubernetes dependencies. -## Update e2e tests - -Run the following script to update the k8s versions used in the e2e workflows, adjusting the versions to what you're upgrading to. - -```sh -next=v1.33 -current=v1.32 -old=v1.31 -oldold=v1.30 -sed -i -e "s/$current/$next/g" -e "s/$old/$current/g" -e "s/$oldold/$old/g" \ - .github/workflows/e2e-test-daily.yml \ - .github/workflows/e2e-test-weekly.yml \ - .github/workflows/e2e-test-release.yml -``` - ## Test the new Kubernetes version - Setup a Constellation cluster using the new image with the new bootstrapper binary and check if Kubernetes is deployed successfully. diff --git a/disk-mapper/cmd/main.go b/disk-mapper/cmd/main.go index 8d4e8eee7..f20bf9cfa 100644 --- a/disk-mapper/cmd/main.go +++ b/disk-mapper/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/disk-mapper/internal/diskencryption/diskencryption.go b/disk-mapper/internal/diskencryption/diskencryption.go index c642bb158..6bed71bfe 100644 --- a/disk-mapper/internal/diskencryption/diskencryption.go +++ b/disk-mapper/internal/diskencryption/diskencryption.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/disk-mapper/internal/recoveryserver/BUILD.bazel b/disk-mapper/internal/recoveryserver/BUILD.bazel index fc1c24a53..966b93afd 100644 --- a/disk-mapper/internal/recoveryserver/BUILD.bazel +++ b/disk-mapper/internal/recoveryserver/BUILD.bazel @@ -14,7 +14,7 @@ go_library( "//internal/grpc/grpclog", "//internal/kms/kms", "//internal/logger", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], diff --git a/disk-mapper/internal/recoveryserver/recoveryserver.go b/disk-mapper/internal/recoveryserver/recoveryserver.go index 5852b859b..5234d2e63 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -59,7 +59,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *slog.Logger) *RecoveryServ grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), - logger.GetServerStreamInterceptor(logger.GRPCLogger(log)), + logger.GetServerStreamInterceptor(log.WithGroup("gRPC")), ) recoverproto.RegisterAPIServer(grpcServer, server) diff --git a/disk-mapper/internal/recoveryserver/recoveryserver_test.go b/disk-mapper/internal/recoveryserver/recoveryserver_test.go index 5676de7f0..cf4e6c043 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver_test.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package recoveryserver @@ -40,7 +40,7 @@ func TestServe(t *testing.T) { server := New(atls.NewFakeIssuer(variant.Dummy{}), newStubKMS(nil, nil), log) dialer := testdialer.NewBufconnDialer() listener := dialer.GetListener("192.0.2.1:1234") - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup // Serve method returns when context is canceled @@ -62,7 +62,7 @@ func TestServe(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - _, _, err := server.Serve(t.Context(), listener, uuid) + _, _, err := server.Serve(context.Background(), listener, uuid) assert.NoError(err) }() time.Sleep(100 * time.Millisecond) @@ -70,7 +70,7 @@ func TestServe(t *testing.T) { wg.Wait() // Serve method returns an error when serving is unsuccessful - _, _, err := server.Serve(t.Context(), listener, uuid) + _, _, err := server.Serve(context.Background(), listener, uuid) assert.Error(err) } @@ -104,7 +104,7 @@ func TestRecover(t *testing.T) { assert := assert.New(t) require := require.New(t) - ctx := t.Context() + ctx := context.Background() serverUUID := "uuid" server := New(atls.NewFakeIssuer(variant.Dummy{}), tc.factory, logger.NewTest(t)) netDialer := testdialer.NewBufconnDialer() @@ -123,7 +123,7 @@ func TestRecover(t *testing.T) { diskKey, measurementSecret, serveErr = server.Serve(serveCtx, listener, serverUUID) }() - conn, err := dialer.New(nil, nil, netDialer).Dial("192.0.2.1:1234") + conn, err := dialer.New(nil, nil, netDialer).Dial(ctx, "192.0.2.1:1234") require.NoError(err) defer conn.Close() diff --git a/disk-mapper/internal/rejoinclient/BUILD.bazel b/disk-mapper/internal/rejoinclient/BUILD.bazel index 77238f692..01f0c26e5 100644 --- a/disk-mapper/internal/rejoinclient/BUILD.bazel +++ b/disk-mapper/internal/rejoinclient/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//internal/role", "//joinservice/joinproto", "@io_k8s_utils//clock", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", ], ) @@ -31,7 +31,7 @@ go_test( "//joinservice/joinproto", "@com_github_stretchr_testify//assert", "@io_k8s_utils//clock/testing", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_uber_go_goleak//:goleak", ], ) diff --git a/disk-mapper/internal/rejoinclient/rejoinclient.go b/disk-mapper/internal/rejoinclient/rejoinclient.go index bbd511971..bedb01535 100644 --- a/disk-mapper/internal/rejoinclient/rejoinclient.go +++ b/disk-mapper/internal/rejoinclient/rejoinclient.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -123,7 +123,7 @@ func (c *RejoinClient) requestRejoinTicket(endpoint string) (*joinproto.IssueRej ctx, cancel := c.timeoutCtx() defer cancel() - conn, err := c.dialer.Dial(endpoint) + conn, err := c.dialer.Dial(ctx, endpoint) if err != nil { return nil, err } @@ -185,7 +185,7 @@ func (c *RejoinClient) timeoutCtx() (context.Context, context.CancelFunc) { } type grpcDialer interface { - Dial(target string) (*grpc.ClientConn, error) + Dial(ctx context.Context, target string) (*grpc.ClientConn, error) } type metadataAPI interface { diff --git a/disk-mapper/internal/rejoinclient/rejoinclient_test.go b/disk-mapper/internal/rejoinclient/rejoinclient_test.go index 51e3a6bd1..18bf15df1 100644 --- a/disk-mapper/internal/rejoinclient/rejoinclient_test.go +++ b/disk-mapper/internal/rejoinclient/rejoinclient_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package rejoinclient @@ -71,7 +71,7 @@ func TestStartCancel(t *testing.T) { go rejoinServer.Serve(listener) defer rejoinServer.GracefulStop() - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup wg.Add(1) @@ -294,7 +294,7 @@ func TestStart(t *testing.T) { client := New(dialer, tc.nodeInfo, meta, logger.NewTest(t)) - passphrase, secret := client.Start(t.Context(), "uuid") + passphrase, secret := client.Start(context.Background(), "uuid") assert.Equal(diskKey, passphrase) assert.Equal(measurementSecret, secret) }) diff --git a/disk-mapper/internal/setup/interface.go b/disk-mapper/internal/setup/interface.go index 6f4e02c86..50bd008a7 100644 --- a/disk-mapper/internal/setup/interface.go +++ b/disk-mapper/internal/setup/interface.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package setup diff --git a/disk-mapper/internal/setup/mount_cross.go b/disk-mapper/internal/setup/mount_cross.go index 1c8015ee8..271a467d0 100644 --- a/disk-mapper/internal/setup/mount_cross.go +++ b/disk-mapper/internal/setup/mount_cross.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package setup diff --git a/disk-mapper/internal/setup/mount_linux.go b/disk-mapper/internal/setup/mount_linux.go index f0ba7dff0..d3ee2d229 100644 --- a/disk-mapper/internal/setup/mount_linux.go +++ b/disk-mapper/internal/setup/mount_linux.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package setup diff --git a/disk-mapper/internal/setup/setup.go b/disk-mapper/internal/setup/setup.go index 47b82a348..38b591451 100644 --- a/disk-mapper/internal/setup/setup.go +++ b/disk-mapper/internal/setup/setup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -76,7 +76,7 @@ func New(log *slog.Logger, csp string, diskPath string, fs afero.Afero, // PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk. // Once the disk is mapped, the function taints the node as initialized by updating it's PCRs. -func (s *Manager) PrepareExistingDisk(recoverer RecoveryDoer) error { +func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error { uuid, err := s.mapper.DiskUUID() if err != nil { return err @@ -84,7 +84,7 @@ func (s *Manager) PrepareExistingDisk(recoverer RecoveryDoer) error { s.log.With(slog.String("uuid", uuid)).Info("Preparing existing state disk") endpoint := net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.RecoveryPort)) - passphrase, measurementSecret, err := recoverer.Do(uuid, endpoint) + passphrase, measurementSecret, err := recover.Do(uuid, endpoint) if err != nil { return fmt.Errorf("failed to perform recovery: %w", err) } diff --git a/disk-mapper/internal/setup/setup_test.go b/disk-mapper/internal/setup/setup_test.go index a21416101..1678b6bbf 100644 --- a/disk-mapper/internal/setup/setup_test.go +++ b/disk-mapper/internal/setup/setup_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package setup diff --git a/disk-mapper/internal/systemd/systemd.go b/disk-mapper/internal/systemd/systemd.go index 7520c39fe..843f26c7b 100644 --- a/disk-mapper/internal/systemd/systemd.go +++ b/disk-mapper/internal/systemd/systemd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package systemd configures systemd units for encrypted volumes. diff --git a/disk-mapper/internal/systemd/systemd_test.go b/disk-mapper/internal/systemd/systemd_test.go index 7accd2f58..a85e52eba 100644 --- a/disk-mapper/internal/systemd/systemd_test.go +++ b/disk-mapper/internal/systemd/systemd_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package systemd diff --git a/disk-mapper/internal/test/BUILD.bazel b/disk-mapper/internal/test/BUILD.bazel index 38e3ac89c..8d39f1a03 100644 --- a/disk-mapper/internal/test/BUILD.bazel +++ b/disk-mapper/internal/test/BUILD.bazel @@ -29,7 +29,7 @@ go_test( "@com_github_martinjungblut_go_cryptsetup//:go-cryptsetup", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", ], "@io_bazel_rules_go//go/platform:linux": [ @@ -39,7 +39,7 @@ go_test( "@com_github_martinjungblut_go_cryptsetup//:go-cryptsetup", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", ], "//conditions:default": [], diff --git a/disk-mapper/internal/test/benchmark_test.go b/disk-mapper/internal/test/benchmark_test.go index 419b79608..8581fb8a3 100644 --- a/disk-mapper/internal/test/benchmark_test.go +++ b/disk-mapper/internal/test/benchmark_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package integration diff --git a/disk-mapper/internal/test/integration_test.go b/disk-mapper/internal/test/integration_test.go index 364c97088..cc865c256 100644 --- a/disk-mapper/internal/test/integration_test.go +++ b/disk-mapper/internal/test/integration_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package integration @@ -37,7 +37,7 @@ const ( var diskPath = flag.String("disk", "", "Path to the disk to use for the benchmark") -var toolsEnvs = []string{"DD", "RM"} +var toolsEnvs []string = []string{"DD", "RM"} // addToolsToPATH is used to update the PATH to contain necessary tool binaries for // coreutils. diff --git a/disk-mapper/recoverproto/recover.pb.go b/disk-mapper/recoverproto/recover.pb.go index cf62209b3..2a22120de 100644 --- a/disk-mapper/recoverproto/recover.pb.go +++ b/disk-mapper/recoverproto/recover.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: disk-mapper/recoverproto/recover.proto package recoverproto @@ -15,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -26,18 +25,21 @@ const ( ) type RecoverMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - KmsUri string `protobuf:"bytes,3,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"` - StorageUri string `protobuf:"bytes,4,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KmsUri string `protobuf:"bytes,3,opt,name=kms_uri,json=kmsUri,proto3" json:"kms_uri,omitempty"` + StorageUri string `protobuf:"bytes,4,opt,name=storage_uri,json=storageUri,proto3" json:"storage_uri,omitempty"` } func (x *RecoverMessage) Reset() { *x = RecoverMessage{} - mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RecoverMessage) String() string { @@ -48,7 +50,7 @@ func (*RecoverMessage) ProtoMessage() {} func (x *RecoverMessage) ProtoReflect() protoreflect.Message { mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -78,16 +80,18 @@ func (x *RecoverMessage) GetStorageUri() string { } type RecoverResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *RecoverResponse) Reset() { *x = RecoverResponse{} - mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RecoverResponse) String() string { @@ -98,7 +102,7 @@ func (*RecoverResponse) ProtoMessage() {} func (x *RecoverResponse) ProtoReflect() protoreflect.Message { mi := &file_disk_mapper_recoverproto_recover_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -115,31 +119,43 @@ func (*RecoverResponse) Descriptor() ([]byte, []int) { var File_disk_mapper_recoverproto_recover_proto protoreflect.FileDescriptor -const file_disk_mapper_recoverproto_recover_proto_rawDesc = "" + - "\n" + - "&disk-mapper/recoverproto/recover.proto\x12\frecoverproto\"J\n" + - "\x0eRecoverMessage\x12\x17\n" + - "\akms_uri\x18\x03 \x01(\tR\x06kmsUri\x12\x1f\n" + - "\vstorage_uri\x18\x04 \x01(\tR\n" + - "storageUri\"\x11\n" + - "\x0fRecoverResponse2O\n" + - "\x03API\x12H\n" + - "\aRecover\x12\x1c.recoverproto.RecoverMessage\x1a\x1d.recoverproto.RecoverResponse\"\x00BBZ@github.com/edgelesssys/constellation/v2/disk-mapper/recoverprotob\x06proto3" +var file_disk_mapper_recoverproto_recover_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x64, 0x69, 0x73, 0x6b, 0x2d, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x72, 0x65, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x0e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x55, 0x72, + 0x69, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, + 0x72, 0x69, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x4f, 0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x48, 0x0a, 0x07, + 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1d, 0x2e, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, + 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, + 0x32, 0x2f, 0x64, 0x69, 0x73, 0x6b, 0x2d, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x72, 0x65, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} var ( file_disk_mapper_recoverproto_recover_proto_rawDescOnce sync.Once - file_disk_mapper_recoverproto_recover_proto_rawDescData []byte + file_disk_mapper_recoverproto_recover_proto_rawDescData = file_disk_mapper_recoverproto_recover_proto_rawDesc ) func file_disk_mapper_recoverproto_recover_proto_rawDescGZIP() []byte { file_disk_mapper_recoverproto_recover_proto_rawDescOnce.Do(func() { - file_disk_mapper_recoverproto_recover_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_disk_mapper_recoverproto_recover_proto_rawDesc), len(file_disk_mapper_recoverproto_recover_proto_rawDesc))) + file_disk_mapper_recoverproto_recover_proto_rawDescData = protoimpl.X.CompressGZIP(file_disk_mapper_recoverproto_recover_proto_rawDescData) }) return file_disk_mapper_recoverproto_recover_proto_rawDescData } var file_disk_mapper_recoverproto_recover_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_disk_mapper_recoverproto_recover_proto_goTypes = []any{ +var file_disk_mapper_recoverproto_recover_proto_goTypes = []interface{}{ (*RecoverMessage)(nil), // 0: recoverproto.RecoverMessage (*RecoverResponse)(nil), // 1: recoverproto.RecoverResponse } @@ -158,11 +174,37 @@ func file_disk_mapper_recoverproto_recover_proto_init() { if File_disk_mapper_recoverproto_recover_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_disk_mapper_recoverproto_recover_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_disk_mapper_recoverproto_recover_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RecoverResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_disk_mapper_recoverproto_recover_proto_rawDesc), len(file_disk_mapper_recoverproto_recover_proto_rawDesc)), + RawDescriptor: file_disk_mapper_recoverproto_recover_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -173,6 +215,7 @@ func file_disk_mapper_recoverproto_recover_proto_init() { MessageInfos: file_disk_mapper_recoverproto_recover_proto_msgTypes, }.Build() File_disk_mapper_recoverproto_recover_proto = out.File + file_disk_mapper_recoverproto_recover_proto_rawDesc = nil file_disk_mapper_recoverproto_recover_proto_goTypes = nil file_disk_mapper_recoverproto_recover_proto_depIdxs = nil } diff --git a/docs/.gitignore b/docs/.gitignore index ef904e99f..b2db0247a 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,3 +1,4 @@ node_modules +package-lock.json .docusaurus build/ \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 4814729e9..dea229064 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,7 +8,7 @@ During edits you can preview your changes using the [`docusaurus`](https://docus ```sh # requires node >=16.14 -npm ci # Install pinned dependencies +npm install npm run build npm run serve ``` diff --git a/docs/docs/_media/concept-constellation.svg b/docs/docs/_media/concept-constellation.svg index 30d32bf6d..caa7f847d 100644 --- a/docs/docs/_media/concept-constellation.svg +++ b/docs/docs/_media/concept-constellation.svg @@ -1,14 +1,35 @@ + id="path123" /> diff --git a/docs/docs/_media/concept-managed.svg b/docs/docs/_media/concept-managed.svg index 5645a608f..718412aad 100644 --- a/docs/docs/_media/concept-managed.svg +++ b/docs/docs/_media/concept-managed.svg @@ -1,14 +1,35 @@ + id="g226"> diff --git a/docs/docs/_media/product-overview-dark.png b/docs/docs/_media/product-overview-dark.png new file mode 100644 index 000000000..4aab5f8bd Binary files /dev/null and b/docs/docs/_media/product-overview-dark.png differ diff --git a/docs/docs/_media/product-overview.png b/docs/docs/_media/product-overview.png new file mode 100644 index 000000000..c44e633cd Binary files /dev/null and b/docs/docs/_media/product-overview.png differ diff --git a/docs/docs/_media/tcb.svg b/docs/docs/_media/tcb.svg index e5bcb5b95..f692ffd0e 100644 --- a/docs/docs/_media/tcb.svg +++ b/docs/docs/_media/tcb.svg @@ -1,14 +1,35 @@ + x="-3" + y="-3" + width="3045" + height="400" + id="rect31" /> diff --git a/docs/docs/architecture/attestation.md b/docs/docs/architecture/attestation.md index 9bd157460..572a8511f 100644 --- a/docs/docs/architecture/attestation.md +++ b/docs/docs/architecture/attestation.md @@ -78,15 +78,15 @@ The idea is that Constellation nodes should have verifiable integrity from the C The solution is a verifiable boot chain and an integrity-protected runtime environment. Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. +Outside of CC, it's usually implemented via TPMs. CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. For simplicity, TPM terminology like *PCR* is used in the following. -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. +When a Constellation node image boots inside a CVM, it uses measured boot for all stages and components of the boot chain. This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. +The root filesystem is mounted read-only with integrity protection, guaranteeing forward integrity. For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. +Any changes to the image will inevitably also change the measured boot's PCR values. To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. @@ -121,40 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -184,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -217,8 +185,40 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + + +Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. + +The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. +The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). + +The following table lists all PCR values of the vTPM and the measured components. +It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. +The latter means that the value can be generated offline and compared to the one in the vTPM. + +| PCR | Components | Measured by | Reproducible and verifiable | +| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | +| 0 | Firmware | AWS | No | +| 1 | Firmware | AWS | No | +| 2 | Firmware | AWS | No | +| 3 | Firmware | AWS | No | +| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | +| 5 | Firmware | AWS | No | +| 6 | Firmware | AWS | No | +| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | +| 8 | - | - | - | +| 9 | initramfs, Kernel command line | Linux Kernel | Yes | +| 10 | User space | Linux IMA | No[^1] | +| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | +| 12 | Reserved | (User space, Constellation Bootloader) | Yes | +| 13 | Reserved | (Constellation Bootloader) | Yes | +| 14 | Secure Boot State | Constellation Bootloader | No | +| 15 | ClusterID | Constellation Bootstrapper | Yes | +| 16–23 | Unused | - | - | + + + Constellation uses a hypervisor-based vTPM for runtime measurements. @@ -249,38 +249,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -302,10 +280,17 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. +On GCP, AMD SEV-ES is used to provide runtime encryption to the VMs. +The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). +There is no additional configuration available for GCP. + + + + +On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the VM. You may customize certain parameters for verification of the attestation statement using the Constellation config file. @@ -324,15 +309,15 @@ You may customize certain parameters for verification of the attestation stateme This is the intermediate certificate for verifying the SEV-SNP report's signature. If it's not specified, the CLI fetches it from the AMD key distribution server. - - + + On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). There is no additional configuration available for STACKIT. - - + + ## Cluster attestation @@ -356,52 +341,32 @@ When an initialized node tries to join another cluster, its measurements inevita The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. -## Putting it all together +## Chain of trust -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. +So far, this page described how an entire Constellation cluster can be verified using hardware attestation capabilities and runtime measurements. +The last missing link is how the ground truth in the form of runtime measurements can be securely distributed to the verifying party. -### CLI and node images +The build process of Constellation images also creates the ground truth runtime measurements. +With every release, Edgeless Systems publishes signed runtime measurements. -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. +The CLI executable is also signed by Edgeless Systems. +You can [verify its signature](../workflows/verify-cli.md). -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: +The CLI contains the public key required to verify signed runtime measurements from Edgeless Systems. +When a cluster is [created](../workflows/create.md) or [upgraded](../workflows/upgrade.md), the CLI automatically verifies the measurements for the selected image. -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. +Thus, there's a chain of trust based on cryptographic signatures, which goes from CLI to runtime measurements to images. This is illustrated in the following diagram. ```mermaid flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] + A[Edgeless]-- "signs (cosign)" -->B[CLI] + C[User]-- "verifies (cosign)" -->B[CLI] + B[CLI]-- "contains" -->D["Public Key"] + A[Edgeless]-- "signs" -->E["Runtime measurements"] + D["Public key"]-- "verifies" -->E["Runtime measurements"] + E["Runtime measurements"]-- "verify" -->F["Constellation cluster"] ``` -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - ## References [^1]: Linux IMA produces runtime measurements of user-space binaries. diff --git a/docs/docs/architecture/keys.md b/docs/docs/architecture/keys.md index 49821cd0b..f2c8c3fba 100644 --- a/docs/docs/architecture/keys.md +++ b/docs/docs/architecture/keys.md @@ -42,6 +42,7 @@ Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). +Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. ## Storage encryption @@ -104,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/docs/architecture/versions.md b/docs/docs/architecture/versions.md index 9c9aebc52..fbdda5a57 100644 --- a/docs/docs/architecture/versions.md +++ b/docs/docs/architecture/versions.md @@ -16,6 +16,6 @@ Subsequent Constellation releases drop support for the oldest (and deprecated) K The following Kubernetes versions are currently supported: -* v1.30.14 -* v1.31.11 -* v1.32.7 +* v1.27.9 +* v1.28.5 +* v1.29.0 diff --git a/docs/docs/getting-started/first-steps-local.md b/docs/docs/getting-started/first-steps-local.md index 98f0302de..052d29eae 100644 --- a/docs/docs/getting-started/first-steps-local.md +++ b/docs/docs/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -145,8 +145,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -199,8 +199,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -211,8 +211,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -240,8 +240,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/docs/getting-started/first-steps.md b/docs/docs/getting-started/first-steps.md index fb8437a06..716c24c03 100644 --- a/docs/docs/getting-started/first-steps.md +++ b/docs/docs/getting-started/first-steps.md @@ -13,43 +13,84 @@ If you encounter any problem with the following steps, make sure to use the [lat ## Create a cluster -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. +1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. - - + - ```bash - constellation config generate aws - ``` - - - + ```bash constellation config generate azure ``` - - + + + ```bash constellation config generate gcp ``` - - + + + + + ```bash + constellation config generate aws + ``` + + + + ```bash constellation config generate stackit ``` - - + + + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - + + + + + ```bash + constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config + ``` + + This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + + CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: + + * `germanywestcentral` + * `westus` + * `eastus` + * `northeurope` + * `westeurope` + * `southeastasia` + + If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). + + You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). + + + + + + ```bash + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config + ``` + + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + + Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. + + + + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -76,55 +117,21 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - + To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) + - `projectID`: OpenStack project id (can be found in `clouds.yaml` or `openrc` file of UAT) + - `projectName`: OpenStack project name (can be found in `clouds.yaml` or `openrc` file of UAT) + - `stackitProjectID`: STACKIT project id (can be found after login on ) + - `username`: username of the UAT + - `password`: password of the UAT - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/docs/getting-started/install.md b/docs/docs/getting-started/install.md index f072407d8..9d35c912b 100644 --- a/docs/docs/getting-started/install.md +++ b/docs/docs/getting-started/install.md @@ -9,7 +9,7 @@ Make sure the following requirements are met: * Your machine is running Linux, macOS, or Windows * You have admin rights on your machine * [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT +* Your CSP is Microsoft Azure, Google Cloud Platform (GCP), Amazon Web Services (AWS), or STACKIT ## Install the Constellation CLI @@ -22,8 +22,8 @@ If you prefer to use Terraform, you can alternatively use the [Terraform provide The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -39,8 +39,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -56,9 +56,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -74,9 +74,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -92,9 +92,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -115,8 +115,8 @@ Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgeless 5. Click `New` 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -132,63 +132,12 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: -* `Microsoft.Attestation` +* `Microsoft.Attestation` \[2] * `Microsoft.Compute` * `Microsoft.Insights` * `Microsoft.ManagedIdentity` @@ -208,7 +157,7 @@ The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md), you need the following permissions: -* `Microsoft.Attestation/attestationProviders/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] * `Microsoft.Compute/virtualMachineScaleSets/*` * `Microsoft.Insights/components/*` * `Microsoft.ManagedIdentity/userAssignedIdentities/*` @@ -226,17 +175,16 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: -* `iam.roles.create` -* `iam.roles.delete` -* `iam.roles.get` * `iam.serviceAccountKeys.create` * `iam.serviceAccountKeys.delete` * `iam.serviceAccountKeys.get` @@ -263,11 +211,6 @@ To [create a Constellation cluster](../workflows/create.md), you need the follow * `compute.firewalls.delete` * `compute.firewalls.get` * `compute.firewalls.update` -* `compute.forwardingRules.create` -* `compute.forwardingRules.delete` -* `compute.forwardingRules.get` -* `compute.forwardingRules.setLabels` -* `compute.forwardingRules.list` * `compute.globalAddresses.create` * `compute.globalAddresses.delete` * `compute.globalAddresses.get` @@ -320,16 +263,67 @@ Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and ` Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + + +To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. + +To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeAccountAttributes", + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreatePolicy", + "iam:CreateRole", + "iam:DeleteInstanceProfile", + "iam:DeletePolicy", + "iam:DeletePolicyVersion", + "iam:DeleteRole", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicyVersions", + "iam:ListRolePolicies", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile", + "sts:GetCallerIdentity" + ], + "Resource": "*" + } + ] +} +``` + +The built-in `AdministratorAccess` policy is a superset of these permissions. + +To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). + +The built-in `PowerUserAccess` policy is a superset of these permissions. + +Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). + + + Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. The UAT already has all required permissions by default. The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - + + ### Authentication @@ -339,25 +333,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - + + **Testing** @@ -373,8 +350,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -397,18 +374,30 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + + +**Testing** + +You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). + +**Production** + +Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: + +```bash +aws configure +``` + +Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). + + + You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). 1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - +2. Create a configuration file under `~/.config/openstack/clouds.yaml` (`%AppData%\openstack\clouds.yaml` on Windows) with the credentials from the User Access Token ```yaml clouds: stackit: @@ -416,31 +405,23 @@ You need to authenticate with the infrastructure API (OpenStack) and create a se auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 username: REPLACE_WITH_UAT_USERNAME password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_OPENSTACK_PROJECT_ID + project_id: REPLACE_WITH_STACKIT_PROJECT_ID project_name: REPLACE_WITH_STACKIT_PROJECT_NAME user_domain_name: portal_mvp project_domain_name: portal_mvp region_name: RegionOne identity_api_version: 3 ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - 3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token 4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) 5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - ```json {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} ``` - + - + ## Next steps diff --git a/docs/docs/getting-started/marketplaces.md b/docs/docs/getting-started/marketplaces.md index a6763a42a..b16d796d2 100644 --- a/docs/docs/getting-started/marketplaces.md +++ b/docs/docs/getting-started/marketplaces.md @@ -4,8 +4,8 @@ Constellation is available through the Marketplaces of AWS, Azure, GCP, and STAC This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - + + To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. @@ -15,8 +15,8 @@ Then, enable the use of marketplace images in your Constellation `constellation- yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml ``` - - + + Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. @@ -32,8 +32,8 @@ Then, enable the use of marketplace images in your Constellation `constellation- yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml ``` - - + + To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). @@ -43,13 +43,13 @@ Then, enable the use of marketplace images in your Constellation `constellation- yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml ``` - - + + On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - + + Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). diff --git a/docs/docs/overview/clouds.md b/docs/docs/overview/clouds.md index b2695d28e..b2de81e4b 100644 --- a/docs/docs/overview/clouds.md +++ b/docs/docs/overview/clouds.md @@ -12,46 +12,46 @@ For Constellation, the ideal environment provides the following: (1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. -The following table summarizes the state of features for different infrastructures. +The following table summarizes the state of features for different infrastructures as of June 2023. -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. +| **Feature** | **Azure** | **GCP** | **AWS** | **STACKIT** | **OpenStack (Yoga)** | +|-----------------------------------|-----------|---------|---------|--------------|----------------------| +| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | +| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | +| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | +| **4. Reviewable firmware** | No | No | Yes | No | Depends on kernel/HV | +| **5. Confidential measured boot** | Yes | No | No | No | Depends on kernel/HV | ## Microsoft Azure With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. +Regarding (3), Azure provides direct access to remote-attestation statements. The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. +The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. +CVMs with SEV-SNP enabled are currently in [public preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. Regarding (4), the CVMs still include closed-source firmware. -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. + +In the past, Intel and Google have [collaborated](https://cloud.google.com/blog/products/identity-security/rsa-google-intel-confidential-computing-more-secure) to enhance the security of TDX. +Recently, Google has announced a [private preview for TDX](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense?hl=en). +With TDX on Google, Constellation has a similar TCB and attestation flow as with the current SEV-SNP offering. + +## Amazon Web Services (AWS) + +Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). +Regarding (3), AWS provides direct access to remote-attestation statements. +However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. +Hence, the hypervisor is currently part of Constellation's TCB. +Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. ## STACKIT diff --git a/docs/docs/overview/confidential-kubernetes.md b/docs/docs/overview/confidential-kubernetes.md index bff8c3322..ca20df4de 100644 --- a/docs/docs/overview/confidential-kubernetes.md +++ b/docs/docs/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/docs/overview/license.md b/docs/docs/overview/license.md index 98a9cbf94..34122c025 100644 --- a/docs/docs/overview/license.md +++ b/docs/docs/overview/license.md @@ -1,12 +1,30 @@ # License -Constellation is available under the [Business Source License 1.1](https://github.com/edgelesssys/constellation/blob/main/LICENSE). +## Source code -You may use it free of charge for non-production use ("Community License"). +Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). + +## Binaries + +Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). + +These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. + +The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. + +## Terraform provider + +Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. + +You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. + +## Community License + +You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. ## Enterprise License -Enterprise Licenses permit production use and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). +Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. diff --git a/docs/docs/overview/performance/compute.md b/docs/docs/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/docs/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/docs/overview/performance/performance.md b/docs/docs/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/docs/overview/performance/performance.md +++ b/docs/docs/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/docs/overview/product.md b/docs/docs/overview/product.md index 4b5d90706..8e8ee6950 100644 --- a/docs/docs/overview/product.md +++ b/docs/docs/overview/product.md @@ -6,7 +6,7 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), Amazon Web Services (AWS), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. * **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/docs/reference/cli.md b/docs/docs/reference/cli.md index c5d7c652b..52391f3d1 100644 --- a/docs/docs/reference/cli.md +++ b/docs/docs/reference/cli.md @@ -39,7 +39,6 @@ Commands: * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile * [version](#constellation-version): Display version of this CLI * [init](#constellation-init): Initialize the Constellation cluster -* [ssh](#constellation-ssh): Generate a certificate for emergency SSH access ## constellation config @@ -79,10 +78,9 @@ constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] ### Options ``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used + -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.31") - -t, --tags strings additional tags for created resources given a list of key=value + -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.28") ``` ### Options inherited from parent commands @@ -656,7 +654,6 @@ constellation iam create azure [flags] --region string region the resources will be created in, e.g., westus (required) --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set ``` ### Options inherited from parent commands @@ -685,13 +682,13 @@ constellation iam create gcp [flags] ### Options ``` - -h, --help help for gcp - --prefix string Prefix for the service account ID and VM ID that will be created (required) - Must be letters, digits, or hyphens. - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available + -h, --help help for gcp + --projectID string ID of the GCP project the configuration will be created in (required) + Find it on the welcome screen of your project: https://console.cloud.google.com/welcome + --serviceAccountID string ID for the service account that will be created (required) + Must be 6 to 30 lowercase letters, digits, or hyphens. + --zone string GCP zone the cluster will be deployed in (required) + Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available ``` ### Options inherited from parent commands @@ -843,31 +840,3 @@ constellation init [flags] -C, --workspace string path to the Constellation workspace ``` -## constellation ssh - -Generate a certificate for emergency SSH access - -### Synopsis - -Generate a certificate for emergency SSH access to your SSH-enabled constellation cluster. - -``` -constellation ssh [flags] -``` - -### Options - -``` - -h, --help help for ssh - --key string the path to an existing SSH public key -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/docs/reference/migration.md b/docs/docs/reference/migration.md index eb55d650b..36680eef6 100644 --- a/docs/docs/reference/migration.md +++ b/docs/docs/reference/migration.md @@ -3,93 +3,39 @@ This document describes breaking changes and migrations between Constellation releases. Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. -## Migrations to v2.23.0 +## Migrating from Azure's service principal authentication to managed identity authentication -### GCP - -GCP will require the additional permission `compute.forwardingRules.list`. Please update your IAM roles using `constellation iam upgrade apply`. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrating from CLI versions before 2.21.1 - -### AWS - -* AWS clusters that use `LoadBalancer` resources require more IAM permissions. Please upgrade your IAM roles using `constellation iam upgrade apply`. This will show necessary changes and apply them, if desired. - -## Migrating from CLI versions before 2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from CLI versions before 2.18.0 - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: +- The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. +- To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. +- Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. +- To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. + ## Migrating from CLI versions before 2.10 -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. +- AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. +- The global `nodeGroups` field was added. +- The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. +- The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. ## Migrating from CLI versions before 2.9 -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication +- The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication ## Migrating from CLI versions before 2.8 -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. +- The `measurements` field for each cloud service provider was replaced with a global `attestation` field. +- The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. +- The optional global field `attestationVariant` was replaced by the now required `attestation` field. ## Migrating from CLI versions before 2.3 -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. +- The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: +- The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration:

Show all @@ -119,11 +65,10 @@ echo "All specified rules have been deleted." | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: +- The `enforcedMeasurements` field has been removed and merged with the `measurements` field. + - To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` + - To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: ```diff measurements: diff --git a/docs/docs/workflows/config.md b/docs/docs/workflows/config.md index 7868ff1be..7e8933466 100644 --- a/docs/docs/workflows/config.md +++ b/docs/docs/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,44 +14,56 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - - -```bash -constellation config generate aws -``` - - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + + +```bash +constellation config generate aws +``` + + + ```bash constellation config generate stackit ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + + +By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. + +You can also run `constellation config instance-types` to get the list of all supported options. + + + + +By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. + + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. @@ -62,20 +74,8 @@ If you are using the attestation variant `awsNitroTPM`, you can choose any of th The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - + + By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. @@ -93,8 +93,8 @@ You can choose any of the SEV-enabled instance types. You can find a list of all The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - + + Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. @@ -135,7 +135,7 @@ This configuration creates an additional node group `high_cpu` with a larger ins You can use the field `zone` to specify what availability zone nodes of the group are placed in. On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. +STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2` and `eu01-3` zone. Consult the documentation of your cloud provider for more information: * [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) @@ -153,8 +153,49 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + + +You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). + +```bash +constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest +``` + +This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. + +CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: + +* `germanywestcentral` +* `westus` +* `eastus` +* `northeurope` +* `westeurope` +* `southeastasia` + +If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). + +You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). + +Paste the output into the corresponding fields of the `constellation-conf.yaml` file. + + + + +You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). + +```bash +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test +``` + +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. + +Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. + +Paste the output into the corresponding fields of the `constellation-conf.yaml` file. + + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -178,94 +219,21 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - + + STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -301,18 +269,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - + + + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -326,13 +295,47 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - + + + + +* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. + + Constellation OS images are currently replicated to the following regions: + * `eu-central-1` + * `eu-west-1` + * `eu-west-3` + * `us-east-2` + * `ap-south-1` + + If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). + + You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). + +* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. + + Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). + +* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. + + You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. + + Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. + +* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. + + You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. + + Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. + + + + STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - + +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/docs/workflows/create.md b/docs/docs/workflows/create.md index 6074ebb16..54bc9dcbc 100644 --- a/docs/docs/workflows/create.md +++ b/docs/docs/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -27,8 +27,8 @@ If you don't have a cloud subscription, you can also set up a [local Constellati Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - + + ```bash constellation apply @@ -36,8 +36,8 @@ constellation apply `apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. This provides flexibility in DevOps and can meet potential regulatory requirements. @@ -56,7 +56,7 @@ management tooling of your choice. You need to keep the essential functionality :::info - On Azure, a manual update to the MAA provider's policy is necessary. + On Azure, if the enforcement policy is set to `MAAFallback` in `constellation-config.yaml`, a manual update to the MAA provider's policy is necessary. You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). ```bash @@ -77,8 +77,8 @@ With the required cloud resources set up, continue with initializing your cluste constellation apply --skip-phases=infrastructure ``` - - + + Finally, configure `kubectl` for your cluster: diff --git a/docs/docs/workflows/lb.md b/docs/docs/workflows/lb.md index 868e61076..11e403237 100644 --- a/docs/docs/workflows/lb.md +++ b/docs/docs/workflows/lb.md @@ -4,25 +4,12 @@ Constellation integrates the native load balancers of each CSP. Therefore, to ex ## Internet-facing LB service on AWS -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. +To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). +Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/nlb/). For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). :::caution Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. ::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/docs/workflows/recovery.md b/docs/docs/workflows/recovery.md index 592ae247b..9bbb32652 100644 --- a/docs/docs/workflows/recovery.md +++ b/docs/docs/workflows/recovery.md @@ -16,39 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via serial console output. In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -82,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -118,8 +87,39 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + + +First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. + +Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. + +In the serial console output, search for `Waiting for decryption key`. +Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} +``` + +The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. +If this fails due to an unhealthy control plane, you will see log messages similar to the following: + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} +{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} +{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} +{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} +``` + +This means that you have to recover the node manually. + + + First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. @@ -149,8 +149,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster @@ -162,7 +162,7 @@ Recovering a cluster requires the following parameters: A cluster can be recovered like this: ```bash -$ constellation recover +$ constellation recover --master-secret constellation-mastersecret.json Pushed recovery key. Pushed recovery key. Pushed recovery key. diff --git a/docs/docs/workflows/reproducible-builds.md b/docs/docs/workflows/reproducible-builds.md deleted file mode 100644 index e3bc46095..000000000 --- a/docs/docs/workflows/reproducible-builds.md +++ /dev/null @@ -1,63 +0,0 @@ -# Reproduce released artifacts - -Constellation has first-class support for [reproducible builds](https://reproducible-builds.org). -Reproducing the released artifacts is an alternative to [signature verification](verify-cli.md) that doesn't require trusting Edgeless Systems' release process. -The following sections describe how to rebuild an artifact and how Constellation ensures that the rebuild reproduces the artifacts bit-by-bit. - -## Build environment prerequisites - -The build systems used by Constellation - [Bazel](https://bazel.build/) and [Nix](https://nixos.org) - are designed for deterministic, reproducible builds. -These two dependencies should be the only prerequisites for a successful build. -However, it can't be ruled out completely that peculiarities of the host affect the build result. -Thus, we recommend the following host setup for best results: - -1. A Linux operating system not older than v5.4. -2. The GNU C library not older than v2.31 (avoid `musl`). -3. GNU `coreutils` not older than v8.30 (avoid `busybox`). -4. An `ext4` filesystem for building. -5. AppArmor turned off. - -This is given, for example, on an Ubuntu 22.04 system, which is also used for reproducibility tests. - -:::note - -To avoid any backwards-compatibility issues, the host software versions should also not be much newer than the Constellation release. - -::: - -## Run the build - -The following instructions outline qualitatively how to reproduce a build. -Constellation implements these instructions in the [Reproducible Builds workflow](https://github.com/edgelesssys/constellation/actions/workflows/reproducible-builds.yml), which continuously tests for reproducibility. -The workflow is a good place to look up specific version numbers and build steps. - -1. Check out the Constellation repository at the tag corresponding to the release. - - ```bash - git clone https://github.com/edgelesssys/constellation.git - cd constellation - git checkout v2.20.0 - ``` - -2. [Install the Bazel release](https://bazel.build/install) specified in `.bazelversion`. -3. [Install Nix](https://nixos.org/download/) (any recent version should do). -4. Run the build with `bazel build $target` for one of the following targets of interest: - - ```data - //cli:cli_enterprise_darwin_amd64 - //cli:cli_enterprise_darwin_arm64 - //cli:cli_enterprise_linux_amd64 - //cli:cli_enterprise_linux_arm64 - //cli:cli_enterprise_windows_amd64 - ``` - -5. Compare the build result with the downloaded release artifact. - - - -## Feedback - -Reproduction failures often indicate a bug in the build system or in the build definitions. -Therefore, we're interested in any reproducibility issues you might encounter. -[Start a bug report](https://github.com/edgelesssys/constellation/issues/new/choose) and describe the details of your build environment. -Make sure to include your result binary or a [`diffoscope`](https://diffoscope.org/) report, if possible. diff --git a/docs/docs/workflows/sbom.md b/docs/docs/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/docs/workflows/sbom.md +++ b/docs/docs/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/docs/workflows/scale.md b/docs/docs/workflows/scale.md index 28f19e3f1..06898ad0c 100644 --- a/docs/docs/workflows/scale.md +++ b/docs/docs/workflows/scale.md @@ -51,36 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. +1. Go to Auto Scaling Groups and select the worker ASG to scale up. +2. Click **Edit** +3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -88,35 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - - + -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. +1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. +2. Click **Edit** +3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/docs/workflows/storage.md b/docs/docs/workflows/storage.md index a5c52be90..1cae8dacc 100644 --- a/docs/docs/workflows/storage.md +++ b/docs/docs/workflows/storage.md @@ -21,37 +21,37 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + + +**Constellation CSI driver for AWS Elastic Block Store** +Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. +Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. + + + **Constellation CSI driver for STACKIT / OpenStack Cinder** Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -60,37 +60,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - + + Azure comes with two storage classes by default. @@ -118,8 +89,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -147,8 +118,37 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + + +AWS comes with two storage classes by default. + +* `encrypted-rwo` + * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + * ext-4 filesystem + * Encryption of all data written to disk +* `integrity-encrypted-rwo` + * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + * ext-4 filesystem + * Encryption of all data written to disk + * Integrity protection of data written to disk + +For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). + +:::info + +The default storage class is set to `encrypted-rwo` for performance reasons. +If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. + +Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. +Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. + +Note that volume expansion isn't supported for integrity-protected disks. + +::: + + + STACKIT comes with two storage classes by default. @@ -176,8 +176,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/docs/workflows/terminate.md b/docs/docs/workflows/terminate.md index 2c45bebe3..58c274bdd 100644 --- a/docs/docs/workflows/terminate.md +++ b/docs/docs/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-state.yaml constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/docs/workflows/terraform-provider.md b/docs/docs/workflows/terraform-provider.md index c7a795d3f..78e63f326 100644 --- a/docs/docs/workflows/terraform-provider.md +++ b/docs/docs/workflows/terraform-provider.md @@ -21,22 +21,11 @@ This example shows how to set up a Constellation cluster with the reference IAM 2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. 3. Initialize and apply the Terraform configuration. - + - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. + + When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you + can also do it manually. ```bash terraform init @@ -46,7 +35,9 @@ On SEV-SNP, you need to manually patch the policy of the MAA provider before cre terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration ``` - Use the following policy if manually performing the patch. + Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. + + Use the following policy if manually performing the patch. ``` version= 1.0; @@ -66,20 +57,8 @@ On SEV-SNP, you need to manually patch the policy of the MAA provider before cre }; ``` -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - + + Initialize the providers and apply the configuration. ```bash @@ -88,8 +67,8 @@ On SEV-SNP, you need to manually patch the policy of the MAA provider before cre ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - + + Initialize the providers and apply the configuration. ```bash @@ -98,9 +77,19 @@ On SEV-SNP, you need to manually patch the policy of the MAA provider before cre ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - + + + Initialize the providers and apply the configuration. - + ```bash + terraform init + terraform apply + ``` + + Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. + + + 4. Connect to the cluster. ```bash diff --git a/docs/docs/workflows/troubleshooting.md b/docs/docs/workflows/troubleshooting.md index 7ed26ae7f..3c952dddc 100644 --- a/docs/docs/workflows/troubleshooting.md +++ b/docs/docs/workflows/troubleshooting.md @@ -40,24 +40,6 @@ Or alternatively, for `terminate`: ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - ### Nodes fail to join with error `untrusted measurement value` This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). @@ -149,54 +131,3 @@ Debugging via a shell on a node is [directly supported by Kubernetes](https://ku ```bash kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj ``` - -### Emergency SSH access - -Emergency SSH access to nodes can be useful to diagnose issues or download important data even if the Kubernetes API isn't reachable anymore. - -1. Enter the `constellation-terraform` directory in your Constellation workspace and enable emergency SSH access to the cluster: - - ```bash - cd constellation-terraform - echo "emergency_ssh = true" >> ./terraform.tfvars - terraform apply - ``` - -2. Sign an existing SSH key with your master secret: - - ```bash - cd ../ # go back to your Constellation workspace - constellation ssh --key your_public_key.pub - ``` - - A certificate is written to `constellation_cert.pub`. - - The certificate is valid for 24 hours and enables you to access your Constellation nodes using - [certificate based authentication](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Certificate-based_Authentication). - -3. Now you can connect to any Constellation node using your certificate and your private key. - - ```bash - ssh -o CertificateFile=constellation_cert.pub -o UserKnownHostsFile=./known_hosts -i root@ - ``` - - Normally, you don't have access to the Constellation nodes since they reside in a private network. - To access those nodes anyways, you can use your Constellation load balancer as a proxy jump host. - For this, use something along the following SSH client configuration: - - ```text - Host - ProxyJump none - - Host * - IdentityFile - PreferredAuthentications publickey - CertificateFile=constellation_cert.pub - UserKnownHostsFile=./known_hosts - User root - ProxyJump - ``` - - With this configuration you can connect to a Constellation node using `ssh -F `. - You can obtain the private node IP and the public IP of the load balancer using your CSP's web UI. Note that if - you use the load balancers domain name, ssh host certificate verification doesn't work, so using the public IP is recommended. diff --git a/docs/docs/workflows/trusted-launch.md b/docs/docs/workflows/trusted-launch.md index d6d01d8eb..9bc7e785f 100644 --- a/docs/docs/workflows/trusted-launch.md +++ b/docs/docs/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: diff --git a/docs/docs/workflows/upgrade.md b/docs/docs/workflows/upgrade.md index 3db2ecad6..7348c0dbc 100644 --- a/docs/docs/workflows/upgrade.md +++ b/docs/docs/workflows/upgrade.md @@ -1,6 +1,6 @@ # Upgrade your cluster -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. +Constellation provides an easy way to upgrade all components of your cluster, without disrupting it's availability. Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. To learn about available versions you use the `upgrade check` command. diff --git a/docs/docs/workflows/verify-cli.md b/docs/docs/workflows/verify-cli.md index e33569d37..78341f314 100644 --- a/docs/docs/workflows/verify-cli.md +++ b/docs/docs/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -37,7 +37,7 @@ You don't need to verify the Constellation node images. This is done automatical This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. ::: -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/docs/workflows/verify-cluster.md b/docs/docs/workflows/verify-cluster.md index b6595ebf2..20d416790 100644 --- a/docs/docs/workflows/verify-cluster.md +++ b/docs/docs/workflows/verify-cluster.md @@ -88,7 +88,6 @@ The `verify` command also allows you to verify any Constellation deployment that * The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. * The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. For example: diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index d5d220fdf..2dc60e883 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -13,7 +13,6 @@ async function createConfig() { baseUrl: '/constellation/', onBrokenLinks: 'throw', onBrokenMarkdownLinks: 'throw', - onBrokenAnchors: 'throw', favicon: 'img/favicon.ico', // GitHub pages deployment config. @@ -23,11 +22,9 @@ async function createConfig() { // scripts scripts: [ - { - src: '/constellation/gtagman.js', - async: true, - "data-cookieconsent": "ignore", - }, + { src: 'https://plausible.io/js/plausible.js', async: true, defer: true, 'data-domain': 'docs.edgeless.systems' }, + { id: "Cookiebot", src: "https://consent.cookiebot.com/uc.js", "data-cbid": "a0cc864f-0b67-49be-8d65-9ed354de2ee6", "data-blockingmode": "auto" }, + { id: "CookieDeclaration", src: "https://consent.cookiebot.com/a0cc864f-0b67-49be-8d65-9ed354de2ee6/cd.js" } ], // Even if you don't use internalization, you can use this field to set useful @@ -61,6 +58,10 @@ async function createConfig() { theme: { customCss: require.resolve('./src/css/custom.css'), }, + gtag: { + trackingID: 'G-3DVYB2CHLG', + anonymizeIP: true, + } }), ], ], diff --git a/docs/package-lock.json b/docs/package-lock.json deleted file mode 100644 index cb381bcac..000000000 --- a/docs/package-lock.json +++ /dev/null @@ -1,19420 +0,0 @@ -{ - "name": "constellation-docs", - "version": "2.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "constellation-docs", - "version": "2.0.0", - "dependencies": { - "@cmfcmf/docusaurus-search-local": "1.2.0", - "@docusaurus/core": "3.8.0", - "@docusaurus/preset-classic": "3.8.0", - "@docusaurus/theme-mermaid": "3.8.0", - "@mdx-js/react": "3.1.0", - "asciinema-player": "3.10.0", - "clsx": "2.1.1", - "prism-react-renderer": "2.4.1", - "react": "18.3.1", - "react-dom": "18.3.1" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/types": "3.8.0" - }, - "engines": { - "node": ">=16.14" - } - }, - "node_modules/@algolia/autocomplete-core": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.17.7.tgz", - "integrity": "sha512-BjiPOW6ks90UKl7TwMv7oNQMnzU+t/wk9mgIDi6b1tXpUek7MW0lbNOUHpvam9pe3lVCf4xPFT+lK7s+e+fs7Q==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-plugin-algolia-insights": "1.17.7", - "@algolia/autocomplete-shared": "1.17.7" - } - }, - "node_modules/@algolia/autocomplete-js": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-js/-/autocomplete-js-1.17.7.tgz", - "integrity": "sha512-4rCCg2B5x6GYzLfDZ3QipWydznbaMjoIwNSEbjpJ9cd/0+4nDpRWuBPxgOSsGmE4BFEor2iwQw4uCY6RrBdpjA==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-core": "1.17.7", - "@algolia/autocomplete-preset-algolia": "1.17.7", - "@algolia/autocomplete-shared": "1.17.7", - "htm": "^3.1.1", - "preact": "^10.13.2" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.5.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-plugin-algolia-insights": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.7.tgz", - "integrity": "sha512-Jca5Ude6yUOuyzjnz57og7Et3aXjbwCSDf/8onLHSQgw1qW3ALl9mrMWaXb5FmPVkV3EtkD2F/+NkT6VHyPu9A==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.17.7" - }, - "peerDependencies": { - "search-insights": ">= 1 < 3" - } - }, - "node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.7.tgz", - "integrity": "sha512-ggOQ950+nwbWROq2MOCIL71RE0DdQZsceqrg32UqnhDz8FlO9rL8ONHNsI2R1MH0tkgVIDKI/D0sMiUchsFdWA==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.17.7" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-shared": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.7.tgz", - "integrity": "sha512-o/1Vurr42U/qskRSuhBH+VKxMvkkUVTLU6WZQr+L5lGZZLYWyhdzWjW0iGXY7EkwRTjBqvN2EsR81yCTGV/kmg==", - "license": "MIT", - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@algolia/autocomplete-theme-classic": { - "version": "1.17.7", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-theme-classic/-/autocomplete-theme-classic-1.17.7.tgz", - "integrity": "sha512-8sxnzRCPxyKZJxbG7EUpV/3AssQOjn+Zq/nvzks+BwbkAcpiLRBsXjvlIIsV4l36bZ+/Ri++ttAflGDPrRfn1A==", - "license": "MIT" - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", - "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.24.0" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", - "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==", - "license": "MIT" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", - "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.24.0" - } - }, - "node_modules/@algolia/client-abtesting": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-abtesting/-/client-abtesting-5.19.0.tgz", - "integrity": "sha512-dMHwy2+nBL0SnIsC1iHvkBao64h4z+roGelOz11cxrDBrAdASxLxmfVMop8gmodQ2yZSacX0Rzevtxa+9SqxCw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-abtesting/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-abtesting/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-abtesting/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", - "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", - "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", - "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-insights": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-insights/-/client-insights-5.19.0.tgz", - "integrity": "sha512-xPOiGjo6I9mfjdJO7Y+p035aWePcbsItizIp+qVyfkfZiGgD+TbNxM12g7QhFAHIkx/mlYaocxPY/TmwPzTe+A==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-insights/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-insights/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-insights/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-personalization": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", - "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/client-query-suggestions": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-query-suggestions/-/client-query-suggestions-5.19.0.tgz", - "integrity": "sha512-6fcP8d4S8XRDtVogrDvmSM6g5g6DndLc0pEm1GCKe9/ZkAzCmM3ZmW1wFYYPxdjMeifWy1vVEDMJK7sbE4W7MA==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-query-suggestions/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-query-suggestions/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-query-suggestions/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/client-search": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", - "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/events": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", - "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==", - "license": "MIT" - }, - "node_modules/@algolia/ingestion": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/@algolia/ingestion/-/ingestion-1.19.0.tgz", - "integrity": "sha512-LO7w1MDV+ZLESwfPmXkp+KLeYeFrYEgtbCZG6buWjddhYraPQ9MuQWLhLLiaMlKxZ/sZvFTcZYuyI6Jx4WBhcg==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/ingestion/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/ingestion/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/ingestion/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/logger-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", - "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==", - "license": "MIT" - }, - "node_modules/@algolia/logger-console": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", - "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", - "license": "MIT", - "dependencies": { - "@algolia/logger-common": "4.24.0" - } - }, - "node_modules/@algolia/monitoring": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/@algolia/monitoring/-/monitoring-1.19.0.tgz", - "integrity": "sha512-Mg4uoS0aIKeTpu6iv6O0Hj81s8UHagi5TLm9k2mLIib4vmMtX7WgIAHAcFIaqIZp5D6s5EVy1BaDOoZ7buuJHA==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/monitoring/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/monitoring/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/monitoring/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/recommend": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", - "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", - "license": "MIT", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.24.0", - "@algolia/cache-common": "4.24.0", - "@algolia/cache-in-memory": "4.24.0", - "@algolia/client-common": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/logger-console": "4.24.0", - "@algolia/requester-browser-xhr": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/requester-node-http": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", - "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", - "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==", - "license": "MIT" - }, - "node_modules/@algolia/requester-fetch": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.19.0.tgz", - "integrity": "sha512-oyTt8ZJ4T4fYvW5avAnuEc6Laedcme9fAFryMD9ndUTIUe/P0kn3BuGcCLFjN3FDmdrETHSFkgPPf1hGy3sLCw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/requester-fetch/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@algolia/requester-node-http": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", - "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", - "license": "MIT", - "dependencies": { - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", - "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", - "license": "MIT", - "dependencies": { - "@algolia/cache-common": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/requester-common": "4.24.0" - } - }, - "node_modules/@ampproject/remapping": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", - "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", - "license": "Apache-2.0", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@antfu/install-pkg": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-0.4.1.tgz", - "integrity": "sha512-T7yB5QNG29afhWVkVq7XeIMBa5U/vs9mX69YqayXypPRmYzUmzwnYltplHmPtZ4HPCn+sQKeXW8I47wCbuBOjw==", - "license": "MIT", - "dependencies": { - "package-manager-detector": "^0.2.0", - "tinyexec": "^0.3.0" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@antfu/utils": { - "version": "0.7.10", - "resolved": "https://registry.npmjs.org/@antfu/utils/-/utils-0.7.10.tgz", - "integrity": "sha512-+562v9k4aI80m1+VuMHehNJWLOFjBnXn3tdOitzD0il5b7smkSBal4+a3oKiQTbrwMmN/TBUMDvbdoWDehgOww==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", - "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-validator-identifier": "^7.25.9", - "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz", - "integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz", - "integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==", - "license": "MIT", - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.26.0", - "@babel/generator": "^7.26.0", - "@babel/helper-compilation-targets": "^7.25.9", - "@babel/helper-module-transforms": "^7.26.0", - "@babel/helpers": "^7.26.0", - "@babel/parser": "^7.26.0", - "@babel/template": "^7.25.9", - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.26.0", - "convert-source-map": "^2.0.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.3", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/generator": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", - "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", - "license": "MIT", - "dependencies": { - "@babel/parser": "^7.26.2", - "@babel/types": "^7.26.0", - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25", - "jsesc": "^3.0.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz", - "integrity": "sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.25.9.tgz", - "integrity": "sha512-C47lC7LIDCnz0h4vai/tpNOI95tCd5ZT3iBt/DBH5lXKHZsyNQv18yf1wIIg2ntiQNgmAvA+DgZ82iW8Qdym8g==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz", - "integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.25.9", - "@babel/helper-validator-option": "^7.25.9", - "browserslist": "^4.24.0", - "lru-cache": "^5.1.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.9.tgz", - "integrity": "sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-member-expression-to-functions": "^7.25.9", - "@babel/helper-optimise-call-expression": "^7.25.9", - "@babel/helper-replace-supers": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", - "@babel/traverse": "^7.25.9", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.25.9.tgz", - "integrity": "sha512-ORPNZ3h6ZRkOyAa/SaHU+XsLZr0UQzRwuDQ0cczIA17nAzZ+85G5cVkOJIj7QavLZGSe8QXUmNFxSZzjcZF9bw==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "regexpu-core": "^6.1.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz", - "integrity": "sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg==", - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.22.6", - "@babel/helper-plugin-utils": "^7.22.5", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz", - "integrity": "sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", - "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", - "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz", - "integrity": "sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz", - "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.9.tgz", - "integrity": "sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-wrap-function": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.25.9.tgz", - "integrity": "sha512-IiDqTOTBQy0sWyeXyGSC5TBJpGFXBkRynjBeXsvbhQFKj2viwJC76Epz35YLU1fpe/Am6Vppb7W7zM4fPQzLsQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-member-expression-to-functions": "^7.25.9", - "@babel/helper-optimise-call-expression": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.25.9.tgz", - "integrity": "sha512-c6WHXuiaRsJTyHYLJV75t9IqsmTbItYfdj99PnzYGQZkYKvan5/2jKJ7gu31J3/BJ/A18grImSPModuyG/Eo0Q==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.9.tgz", - "integrity": "sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA==", - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", - "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.25.9.tgz", - "integrity": "sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g==", - "license": "MIT", - "dependencies": { - "@babel/template": "^7.25.9", - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", - "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", - "license": "MIT", - "dependencies": { - "@babel/template": "^7.25.9", - "@babel/types": "^7.26.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", - "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.26.0" - }, - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.9.tgz", - "integrity": "sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.9.tgz", - "integrity": "sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.9.tgz", - "integrity": "sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.9.tgz", - "integrity": "sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", - "@babel/plugin-transform-optional-chaining": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.9.tgz", - "integrity": "sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0-placeholder-for-preset-env.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", - "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.26.0.tgz", - "integrity": "sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-attributes": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz", - "integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz", - "integrity": "sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz", - "integrity": "sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-unicode-sets-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", - "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.9.tgz", - "integrity": "sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-generator-functions": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.9.tgz", - "integrity": "sha512-RXV6QAzTBbhDMO9fWwOmwwTuYaiPbggWQ9INdZqAYeSHyG7FzQ+nOZaUUjNwKv9pV3aE4WFqFm1Hnbci5tBCAw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-remap-async-to-generator": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.9.tgz", - "integrity": "sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-remap-async-to-generator": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.25.9.tgz", - "integrity": "sha512-toHc9fzab0ZfenFpsyYinOX0J/5dgJVA2fm64xPewu7CoYHWEivIWKxkK2rMi4r3yQqLnVmheMXRdG+k239CgA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.9.tgz", - "integrity": "sha512-1F05O7AYjymAtqbsFETboN1NvBdcnzMerO+zlMyJBEz6WkMdejvGWw9p05iTSjC85RLlBseHHQpYaM4gzJkBGg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-properties": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.9.tgz", - "integrity": "sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-static-block": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.26.0.tgz", - "integrity": "sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.9.tgz", - "integrity": "sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-compilation-targets": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-replace-supers": "^7.25.9", - "@babel/traverse": "^7.25.9", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz", - "integrity": "sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/template": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.9.tgz", - "integrity": "sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.9.tgz", - "integrity": "sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.9.tgz", - "integrity": "sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.9.tgz", - "integrity": "sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-dynamic-import": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.9.tgz", - "integrity": "sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.25.9.tgz", - "integrity": "sha512-KRhdhlVk2nObA5AYa7QMgTMTVJdfHprfpAk4DjZVtllqRg9qarilstTKEhpVjyt+Npi8ThRyiV8176Am3CodPA==", - "license": "MIT", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-export-namespace-from": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.9.tgz", - "integrity": "sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.25.9.tgz", - "integrity": "sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.9.tgz", - "integrity": "sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA==", - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-json-strings": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.9.tgz", - "integrity": "sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.9.tgz", - "integrity": "sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-logical-assignment-operators": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.9.tgz", - "integrity": "sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.9.tgz", - "integrity": "sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.9.tgz", - "integrity": "sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.25.9.tgz", - "integrity": "sha512-dwh2Ol1jWwL2MgkCzUSOvfmKElqQcuswAZypBSUsScMXvgdT8Ekq5YA6TtqpTVWH+4903NmboMuH1o9i8Rxlyg==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-simple-access": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.9.tgz", - "integrity": "sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9", - "@babel/traverse": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.9.tgz", - "integrity": "sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.9.tgz", - "integrity": "sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.9.tgz", - "integrity": "sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.25.9.tgz", - "integrity": "sha512-ENfftpLZw5EItALAD4WsY/KUWvhUlZndm5GC7G3evUsVeSJB6p0pBeLQUnRnBCBx7zV0RKQjR9kCuwrsIrjWog==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-numeric-separator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.9.tgz", - "integrity": "sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-rest-spread": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.9.tgz", - "integrity": "sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg==", - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/plugin-transform-parameters": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.9.tgz", - "integrity": "sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-replace-supers": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-catch-binding": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.9.tgz", - "integrity": "sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-optional-chaining": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.9.tgz", - "integrity": "sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.9.tgz", - "integrity": "sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-methods": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.9.tgz", - "integrity": "sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-private-property-in-object": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.9.tgz", - "integrity": "sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-create-class-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.9.tgz", - "integrity": "sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.9.tgz", - "integrity": "sha512-Ncw2JFsJVuvfRsa2lSHiC55kETQVLSnsYGQ1JDDwkUeWGTL/8Tom8aLTnlqgoeuopWrbbGndrc9AlLYrIosrow==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.25.9.tgz", - "integrity": "sha512-KJfMlYIUxQB1CJfO3e0+h0ZHWOTLCPP115Awhaz8U0Zpq36Gl/cXlpoyMRnUWlhNUBAzldnCiAZNvCDj7CrKxQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.25.9.tgz", - "integrity": "sha512-s5XwpQYCqGerXl+Pu6VDL3x0j2d82eiV77UJ8a2mDHAW7j9SWRqQ2y1fNo1Z74CdcYipl5Z41zvjj4Nfzq36rw==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-module-imports": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/plugin-syntax-jsx": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.25.9.tgz", - "integrity": "sha512-9mj6rm7XVYs4mdLIpbZnHOYdpW42uoiBCTVowg7sP1thUOiANgMb4UtpRivR0pp5iL+ocvUv7X4mZgFRpJEzGw==", - "license": "MIT", - "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.25.9.tgz", - "integrity": "sha512-KQ/Takk3T8Qzj5TppkS1be588lkbTp5uj7w6a0LeQaTMSckU/wK0oJ/pih+T690tkgI5jfmg2TqDJvd41Sj1Cg==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.25.9.tgz", - "integrity": "sha512-vwDcDNsgMPDGP0nMqzahDWE5/MLcX8sv96+wfX7as7LoF/kr97Bo/7fI00lXY4wUXYfVmwIIyG80fGZ1uvt2qg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "regenerator-transform": "^0.15.2" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regexp-modifiers": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.26.0.tgz", - "integrity": "sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.9.tgz", - "integrity": "sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.25.9.tgz", - "integrity": "sha512-nZp7GlEl+yULJrClz0SwHPqir3lc0zsPrDHQUcxGspSL7AKrexNSEfTbfqnDNJUO13bgKyfuOLMF8Xqtu8j3YQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "babel-plugin-polyfill-corejs2": "^0.4.10", - "babel-plugin-polyfill-corejs3": "^0.10.6", - "babel-plugin-polyfill-regenerator": "^0.6.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.9.tgz", - "integrity": "sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.9.tgz", - "integrity": "sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.9.tgz", - "integrity": "sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.25.9.tgz", - "integrity": "sha512-o97AE4syN71M/lxrCtQByzphAdlYluKPDBzDVzMmfCobUjjhAryZV0AIpRPrxN0eAkxXO6ZLEScmt+PNhj2OTw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.25.9.tgz", - "integrity": "sha512-v61XqUMiueJROUv66BVIOi0Fv/CUuZuZMl5NkRoCVxLAnMexZ0A3kMe7vvZ0nulxMuMp0Mk6S5hNh48yki08ZA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.25.9.tgz", - "integrity": "sha512-7PbZQZP50tzv2KGGnhh82GSyMB01yKY9scIjf1a+GfZCtInOWqUH5+1EBU4t9fyR5Oykkkc9vFTs4OHrhHXljQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.25.9", - "@babel/helper-create-class-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", - "@babel/plugin-syntax-typescript": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.9.tgz", - "integrity": "sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-property-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.9.tgz", - "integrity": "sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.9.tgz", - "integrity": "sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-sets-regex": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.9.tgz", - "integrity": "sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ==", - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.26.0.tgz", - "integrity": "sha512-H84Fxq0CQJNdPFT2DrfnylZ3cf5K43rGfWK4LJGPpjKHiZlk0/RzwEus3PDDZZg+/Er7lCA03MVacueUuXdzfw==", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.26.0", - "@babel/helper-compilation-targets": "^7.25.9", - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-validator-option": "^7.25.9", - "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.25.9", - "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.25.9", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.25.9", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.25.9", - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.25.9", - "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", - "@babel/plugin-syntax-import-assertions": "^7.26.0", - "@babel/plugin-syntax-import-attributes": "^7.26.0", - "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", - "@babel/plugin-transform-arrow-functions": "^7.25.9", - "@babel/plugin-transform-async-generator-functions": "^7.25.9", - "@babel/plugin-transform-async-to-generator": "^7.25.9", - "@babel/plugin-transform-block-scoped-functions": "^7.25.9", - "@babel/plugin-transform-block-scoping": "^7.25.9", - "@babel/plugin-transform-class-properties": "^7.25.9", - "@babel/plugin-transform-class-static-block": "^7.26.0", - "@babel/plugin-transform-classes": "^7.25.9", - "@babel/plugin-transform-computed-properties": "^7.25.9", - "@babel/plugin-transform-destructuring": "^7.25.9", - "@babel/plugin-transform-dotall-regex": "^7.25.9", - "@babel/plugin-transform-duplicate-keys": "^7.25.9", - "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.25.9", - "@babel/plugin-transform-dynamic-import": "^7.25.9", - "@babel/plugin-transform-exponentiation-operator": "^7.25.9", - "@babel/plugin-transform-export-namespace-from": "^7.25.9", - "@babel/plugin-transform-for-of": "^7.25.9", - "@babel/plugin-transform-function-name": "^7.25.9", - "@babel/plugin-transform-json-strings": "^7.25.9", - "@babel/plugin-transform-literals": "^7.25.9", - "@babel/plugin-transform-logical-assignment-operators": "^7.25.9", - "@babel/plugin-transform-member-expression-literals": "^7.25.9", - "@babel/plugin-transform-modules-amd": "^7.25.9", - "@babel/plugin-transform-modules-commonjs": "^7.25.9", - "@babel/plugin-transform-modules-systemjs": "^7.25.9", - "@babel/plugin-transform-modules-umd": "^7.25.9", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.25.9", - "@babel/plugin-transform-new-target": "^7.25.9", - "@babel/plugin-transform-nullish-coalescing-operator": "^7.25.9", - "@babel/plugin-transform-numeric-separator": "^7.25.9", - "@babel/plugin-transform-object-rest-spread": "^7.25.9", - "@babel/plugin-transform-object-super": "^7.25.9", - "@babel/plugin-transform-optional-catch-binding": "^7.25.9", - "@babel/plugin-transform-optional-chaining": "^7.25.9", - "@babel/plugin-transform-parameters": "^7.25.9", - "@babel/plugin-transform-private-methods": "^7.25.9", - "@babel/plugin-transform-private-property-in-object": "^7.25.9", - "@babel/plugin-transform-property-literals": "^7.25.9", - "@babel/plugin-transform-regenerator": "^7.25.9", - "@babel/plugin-transform-regexp-modifiers": "^7.26.0", - "@babel/plugin-transform-reserved-words": "^7.25.9", - "@babel/plugin-transform-shorthand-properties": "^7.25.9", - "@babel/plugin-transform-spread": "^7.25.9", - "@babel/plugin-transform-sticky-regex": "^7.25.9", - "@babel/plugin-transform-template-literals": "^7.25.9", - "@babel/plugin-transform-typeof-symbol": "^7.25.9", - "@babel/plugin-transform-unicode-escapes": "^7.25.9", - "@babel/plugin-transform-unicode-property-regex": "^7.25.9", - "@babel/plugin-transform-unicode-regex": "^7.25.9", - "@babel/plugin-transform-unicode-sets-regex": "^7.25.9", - "@babel/preset-modules": "0.1.6-no-external-plugins", - "babel-plugin-polyfill-corejs2": "^0.4.10", - "babel-plugin-polyfill-corejs3": "^0.10.6", - "babel-plugin-polyfill-regenerator": "^0.6.1", - "core-js-compat": "^3.38.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.6-no-external-plugins", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", - "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/@babel/preset-react": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.25.9.tgz", - "integrity": "sha512-D3to0uSPiWE7rBrdIICCd0tJSIGpLaaGptna2+w7Pft5xMqLpA1sz99DK5TZ1TjGbdQ/VI1eCSZ06dv3lT4JOw==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-validator-option": "^7.25.9", - "@babel/plugin-transform-react-display-name": "^7.25.9", - "@babel/plugin-transform-react-jsx": "^7.25.9", - "@babel/plugin-transform-react-jsx-development": "^7.25.9", - "@babel/plugin-transform-react-pure-annotations": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-typescript": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.26.0.tgz", - "integrity": "sha512-NMk1IGZ5I/oHhoXEElcm+xUnL/szL6xflkFZmoEU9xj1qSJXpiS7rsspYo92B4DRCDvZn2erT5LdsCeXAKNCkg==", - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.25.9", - "@babel/helper-validator-option": "^7.25.9", - "@babel/plugin-syntax-jsx": "^7.25.9", - "@babel/plugin-transform-modules-commonjs": "^7.25.9", - "@babel/plugin-transform-typescript": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/runtime": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz", - "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==", - "license": "MIT", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/runtime-corejs3": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.26.0.tgz", - "integrity": "sha512-YXHu5lN8kJCb1LOb9PgV6pvak43X2h4HvRApcN5SdWeaItQOzfn1hgP6jasD6KWQyJDBxrVmA9o9OivlnNJK/w==", - "license": "MIT", - "dependencies": { - "core-js-pure": "^3.30.2", - "regenerator-runtime": "^0.14.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/template": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", - "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.25.9", - "@babel/parser": "^7.25.9", - "@babel/types": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/traverse": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz", - "integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.25.9", - "@babel/generator": "^7.25.9", - "@babel/parser": "^7.25.9", - "@babel/template": "^7.25.9", - "@babel/types": "^7.25.9", - "debug": "^4.3.1", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/types": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", - "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", - "license": "MIT", - "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@braintree/sanitize-url": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.0.tgz", - "integrity": "sha512-o+UlMLt49RvtCASlOMW0AkHnabN9wR9rwCCherxO0yG4Npy34GkvrAqdXQvrhNs+jh+gkK8gB8Lf05qL/O7KWg==", - "license": "MIT" - }, - "node_modules/@chevrotain/cst-dts-gen": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.0.3.tgz", - "integrity": "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==", - "license": "Apache-2.0", - "dependencies": { - "@chevrotain/gast": "11.0.3", - "@chevrotain/types": "11.0.3", - "lodash-es": "4.17.21" - } - }, - "node_modules/@chevrotain/gast": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.0.3.tgz", - "integrity": "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==", - "license": "Apache-2.0", - "dependencies": { - "@chevrotain/types": "11.0.3", - "lodash-es": "4.17.21" - } - }, - "node_modules/@chevrotain/regexp-to-ast": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.0.3.tgz", - "integrity": "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==", - "license": "Apache-2.0" - }, - "node_modules/@chevrotain/types": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.0.3.tgz", - "integrity": "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==", - "license": "Apache-2.0" - }, - "node_modules/@chevrotain/utils": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.0.3.tgz", - "integrity": "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==", - "license": "Apache-2.0" - }, - "node_modules/@cmfcmf/docusaurus-search-local": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@cmfcmf/docusaurus-search-local/-/docusaurus-search-local-1.2.0.tgz", - "integrity": "sha512-Tc0GhRBsfZAiB+f6BoPB8YCQap6JzzcDyJ0dLSCSzWQ6wdWvDlTBrHc1YqR8q8AZ+STRszL5eZpZFi5dbTCdYg==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-js": "^1.8.2", - "@algolia/autocomplete-theme-classic": "^1.8.2", - "@algolia/client-search": "^4.12.0", - "algoliasearch": "^4.12.0", - "cheerio": "^1.0.0-rc.9", - "clsx": "^1.1.1", - "lunr-languages": "^1.4.0", - "mark.js": "^8.11.1", - "tslib": "^2.6.3" - }, - "peerDependencies": { - "@docusaurus/core": "^2.0.0", - "nodejieba": "^2.5.0" - }, - "peerDependenciesMeta": { - "nodejieba": { - "optional": true - } - } - }, - "node_modules/@cmfcmf/docusaurus-search-local/node_modules/clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/@colors/colors": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", - "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", - "license": "MIT", - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/@csstools/cascade-layer-name-parser": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-2.0.4.tgz", - "integrity": "sha512-7DFHlPuIxviKYZrOiwVU/PiHLm3lLUR23OMuEEtfEOQTOp9hzQ2JjdY6X5H18RVuUPJqSCI+qNnD5iOLMVE0bA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - } - }, - "node_modules/@csstools/color-helpers": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.0.1.tgz", - "integrity": "sha512-MKtmkA0BX87PKaO1NFRTFH+UnkgnmySQOvNxJubsadusqPEC2aJ9MOQiMceZJJ6oitUl/i0L6u0M1IrmAOmgBA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - } - }, - "node_modules/@csstools/css-calc": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.0.tgz", - "integrity": "sha512-X69PmFOrjTZfN5ijxtI8hZ9kRADFSLrmmQ6hgDJ272Il049WGKpDY64KhrFm/7rbWve0z81QepawzjkKlqkNGw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - } - }, - "node_modules/@csstools/css-color-parser": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.0.6.tgz", - "integrity": "sha512-S/IjXqTHdpI4EtzGoNCHfqraXF37x12ZZHA1Lk7zoT5pm2lMjFuqhX/89L7dqX4CcMacKK+6ZCs5TmEGb/+wKw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/color-helpers": "^5.0.1", - "@csstools/css-calc": "^2.1.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - } - }, - "node_modules/@csstools/css-parser-algorithms": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.4.tgz", - "integrity": "sha512-Up7rBoV77rv29d3uKHUIVubz1BTcgyUK72IvCQAbfbMv584xHcGKCKbWh7i8hPrRJ7qU4Y8IO3IY9m+iTB7P3A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-tokenizer": "^3.0.3" - } - }, - "node_modules/@csstools/css-tokenizer": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.3.tgz", - "integrity": "sha512-UJnjoFsmxfKUdNYdWgOB0mWUypuLvAfQPH1+pyvRJs6euowbFkFC6P13w1l8mJyi3vxYMxc9kld5jZEGRQs6bw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/@csstools/media-query-list-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-4.0.2.tgz", - "integrity": "sha512-EUos465uvVvMJehckATTlNqGj4UJWkTmdWuDMjqvSUkjGpmOyFZBVwb4knxCm/k2GMTXY+c/5RkdndzFYWeX5A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - } - }, - "node_modules/@csstools/postcss-cascade-layers": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-cascade-layers/-/postcss-cascade-layers-5.0.1.tgz", - "integrity": "sha512-XOfhI7GShVcKiKwmPAnWSqd2tBR0uxt+runAxttbSp/LY2U16yAVPmAf7e9q4JJ0d+xMNmpwNDLBXnmRCl3HMQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/selector-specificity": "^5.0.0", - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-cascade-layers/node_modules/@csstools/selector-specificity": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", - "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss-selector-parser": "^7.0.0" - } - }, - "node_modules/@csstools/postcss-cascade-layers/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@csstools/postcss-color-function": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-4.0.6.tgz", - "integrity": "sha512-EcvXfC60cTIumzpsxWuvVjb7rsJEHPvqn3jeMEBUaE3JSc4FRuP7mEQ+1eicxWmIrs3FtzMH9gR3sgA5TH+ebQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-color-mix-function": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-3.0.6.tgz", - "integrity": "sha512-jVKdJn4+JkASYGhyPO+Wa5WXSx1+oUgaXb3JsjJn/BlrtFh5zjocCY7pwWi0nuP24V1fY7glQsxEYcYNy0dMFg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-content-alt-text": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@csstools/postcss-content-alt-text/-/postcss-content-alt-text-2.0.4.tgz", - "integrity": "sha512-YItlZUOuZJCBlRaCf8Aucc1lgN41qYGALMly0qQllrxYJhiyzlI6RxOTMUvtWk+KhS8GphMDsDhKQ7KTPfEMSw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-exponential-functions": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@csstools/postcss-exponential-functions/-/postcss-exponential-functions-2.0.5.tgz", - "integrity": "sha512-mi8R6dVfA2nDoKM3wcEi64I8vOYEgQVtVKCfmLHXupeLpACfGAided5ddMt5f+CnEodNu4DifuVwb0I6fQDGGQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-font-format-keywords": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-font-format-keywords/-/postcss-font-format-keywords-4.0.0.tgz", - "integrity": "sha512-usBzw9aCRDvchpok6C+4TXC57btc4bJtmKQWOHQxOVKen1ZfVqBUuCZ/wuqdX5GHsD0NRSr9XTP+5ID1ZZQBXw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-gamut-mapping": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-gamut-mapping/-/postcss-gamut-mapping-2.0.6.tgz", - "integrity": "sha512-0ke7fmXfc8H+kysZz246yjirAH6JFhyX9GTlyRnM0exHO80XcA9zeJpy5pOp5zo/AZiC/q5Pf+Hw7Pd6/uAoYA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-gradients-interpolation-method": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-5.0.6.tgz", - "integrity": "sha512-Itrbx6SLUzsZ6Mz3VuOlxhbfuyLTogG5DwEF1V8dAi24iMuvQPIHd7Ti+pNDp7j6WixndJGZaoNR0f9VSzwuTg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-hwb-function": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-4.0.6.tgz", - "integrity": "sha512-927Pqy3a1uBP7U8sTfaNdZVB0mNXzIrJO/GZ8us9219q9n06gOqCdfZ0E6d1P66Fm0fYHvxfDbfcUuwAn5UwhQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-ic-unit": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-4.0.0.tgz", - "integrity": "sha512-9QT5TDGgx7wD3EEMN3BSUG6ckb6Eh5gSPT5kZoVtUuAonfPmLDJyPhqR4ntPpMYhUKAMVKAg3I/AgzqHMSeLhA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-initial": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-initial/-/postcss-initial-2.0.0.tgz", - "integrity": "sha512-dv2lNUKR+JV+OOhZm9paWzYBXOCi+rJPqJ2cJuhh9xd8USVrd0cBEPczla81HNOyThMQWeCcdln3gZkQV2kYxA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-is-pseudo-class": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-5.0.1.tgz", - "integrity": "sha512-JLp3POui4S1auhDR0n8wHd/zTOWmMsmK3nQd3hhL6FhWPaox5W7j1se6zXOG/aP07wV2ww0lxbKYGwbBszOtfQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/selector-specificity": "^5.0.0", - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-is-pseudo-class/node_modules/@csstools/selector-specificity": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", - "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss-selector-parser": "^7.0.0" - } - }, - "node_modules/@csstools/postcss-is-pseudo-class/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@csstools/postcss-light-dark-function": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@csstools/postcss-light-dark-function/-/postcss-light-dark-function-2.0.7.tgz", - "integrity": "sha512-ZZ0rwlanYKOHekyIPaU+sVm3BEHCe+Ha0/px+bmHe62n0Uc1lL34vbwrLYn6ote8PHlsqzKeTQdIejQCJ05tfw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-logical-float-and-clear": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-float-and-clear/-/postcss-logical-float-and-clear-3.0.0.tgz", - "integrity": "sha512-SEmaHMszwakI2rqKRJgE+8rpotFfne1ZS6bZqBoQIicFyV+xT1UF42eORPxJkVJVrH9C0ctUgwMSn3BLOIZldQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-logical-overflow": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overflow/-/postcss-logical-overflow-2.0.0.tgz", - "integrity": "sha512-spzR1MInxPuXKEX2csMamshR4LRaSZ3UXVaRGjeQxl70ySxOhMpP2252RAFsg8QyyBXBzuVOOdx1+bVO5bPIzA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-logical-overscroll-behavior": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overscroll-behavior/-/postcss-logical-overscroll-behavior-2.0.0.tgz", - "integrity": "sha512-e/webMjoGOSYfqLunyzByZj5KKe5oyVg/YSbie99VEaSDE2kimFm0q1f6t/6Jo+VVCQ/jbe2Xy+uX+C4xzWs4w==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-logical-resize": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-resize/-/postcss-logical-resize-3.0.0.tgz", - "integrity": "sha512-DFbHQOFW/+I+MY4Ycd/QN6Dg4Hcbb50elIJCfnwkRTCX05G11SwViI5BbBlg9iHRl4ytB7pmY5ieAFk3ws7yyg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-logical-viewport-units": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-3.0.3.tgz", - "integrity": "sha512-OC1IlG/yoGJdi0Y+7duz/kU/beCwO+Gua01sD6GtOtLi7ByQUpcIqs7UE/xuRPay4cHgOMatWdnDdsIDjnWpPw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-media-minmax": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-2.0.5.tgz", - "integrity": "sha512-sdh5i5GToZOIAiwhdntRWv77QDtsxP2r2gXW/WbLSCoLr00KTq/yiF1qlQ5XX2+lmiFa8rATKMcbwl3oXDMNew==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/media-query-list-parser": "^4.0.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-media-queries-aspect-ratio-number-values": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-3.0.4.tgz", - "integrity": "sha512-AnGjVslHMm5xw9keusQYvjVWvuS7KWK+OJagaG0+m9QnIjZsrysD2kJP/tr/UJIyYtMCtu8OkUd+Rajb4DqtIQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/media-query-list-parser": "^4.0.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-nested-calc": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-nested-calc/-/postcss-nested-calc-4.0.0.tgz", - "integrity": "sha512-jMYDdqrQQxE7k9+KjstC3NbsmC063n1FTPLCgCRS2/qHUbHM0mNy9pIn4QIiQGs9I/Bg98vMqw7mJXBxa0N88A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-normalize-display-values": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.0.tgz", - "integrity": "sha512-HlEoG0IDRoHXzXnkV4in47dzsxdsjdz6+j7MLjaACABX2NfvjFS6XVAnpaDyGesz9gK2SC7MbNwdCHusObKJ9Q==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-oklab-function": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-4.0.6.tgz", - "integrity": "sha512-Hptoa0uX+XsNacFBCIQKTUBrFKDiplHan42X73EklG6XmQLG7/aIvxoNhvZ7PvOWMt67Pw3bIlUY2nD6p5vL8A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-progressive-custom-properties": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-4.0.0.tgz", - "integrity": "sha512-XQPtROaQjomnvLUSy/bALTR5VCtTVUFwYs1SblvYgLSeTo2a/bMNwUwo2piXw5rTv/FEYiy5yPSXBqg9OKUx7Q==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-random-function": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-random-function/-/postcss-random-function-1.0.1.tgz", - "integrity": "sha512-Ab/tF8/RXktQlFwVhiC70UNfpFQRhtE5fQQoP2pO+KCPGLsLdWFiOuHgSRtBOqEshCVAzR4H6o38nhvRZq8deA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-relative-color-syntax": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@csstools/postcss-relative-color-syntax/-/postcss-relative-color-syntax-3.0.6.tgz", - "integrity": "sha512-yxP618Xb+ji1I624jILaYM62uEmZcmbdmFoZHoaThw896sq0vU39kqTTF+ZNic9XyPtPMvq0vyvbgmHaszq8xg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-scope-pseudo-class": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-scope-pseudo-class/-/postcss-scope-pseudo-class-4.0.1.tgz", - "integrity": "sha512-IMi9FwtH6LMNuLea1bjVMQAsUhFxJnyLSgOp/cpv5hrzWmrUYU5fm0EguNDIIOHUqzXode8F/1qkC/tEo/qN8Q==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-scope-pseudo-class/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@csstools/postcss-sign-functions": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-sign-functions/-/postcss-sign-functions-1.1.0.tgz", - "integrity": "sha512-SLcc20Nujx/kqbSwDmj6oaXgpy3UjFhBy1sfcqPgDkHfOIfUtUVH7OXO+j7BU4v/At5s61N5ZX6shvgPwluhsA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-stepped-value-functions": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-4.0.5.tgz", - "integrity": "sha512-G6SJ6hZJkhxo6UZojVlLo14MohH4J5J7z8CRBrxxUYy9JuZiIqUo5TBYyDGcE0PLdzpg63a7mHSJz3VD+gMwqw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-text-decoration-shorthand": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-4.0.1.tgz", - "integrity": "sha512-xPZIikbx6jyzWvhms27uugIc0I4ykH4keRvoa3rxX5K7lEhkbd54rjj/dv60qOCTisoS+3bmwJTeyV1VNBrXaw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/color-helpers": "^5.0.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-trigonometric-functions": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-4.0.5.tgz", - "integrity": "sha512-/YQThYkt5MLvAmVu7zxjhceCYlKrYddK6LEmK5I4ojlS6BmO9u2yO4+xjXzu2+NPYmHSTtP4NFSamBCMmJ1NJA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-calc": "^2.1.0", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/postcss-unset-value": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-unset-value/-/postcss-unset-value-4.0.0.tgz", - "integrity": "sha512-cBz3tOCI5Fw6NIFEwU3RiwK6mn3nKegjpJuzCndoGq3BZPkUjnsq7uQmIeMNeMbMk7YD2MfKcgCpZwX5jyXqCA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@csstools/utilities": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/utilities/-/utilities-2.0.0.tgz", - "integrity": "sha512-5VdOr0Z71u+Yp3ozOx8T11N703wIFGVRgOWbOZMKgglPJsWA54MRIoMNVMa7shUToIhx5J8vX4sOZgD2XiihiQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/@discoveryjs/json-ext": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", - "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@docsearch/css": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.9.0.tgz", - "integrity": "sha512-cQbnVbq0rrBwNAKegIac/t6a8nWoUAn8frnkLFW6YARaRmAQr5/Eoe6Ln2fqkUCZ40KpdrKbpSAmgrkviOxuWA==", - "license": "MIT" - }, - "node_modules/@docsearch/react": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.9.0.tgz", - "integrity": "sha512-mb5FOZYZIkRQ6s/NWnM98k879vu5pscWqTLubLFBO87igYYT4VzVazh4h5o/zCvTIZgEt3PvsCOMOswOUo9yHQ==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-core": "1.17.9", - "@algolia/autocomplete-preset-algolia": "1.17.9", - "@docsearch/css": "3.9.0", - "algoliasearch": "^5.14.2" - }, - "peerDependencies": { - "@types/react": ">= 16.8.0 < 20.0.0", - "react": ">= 16.8.0 < 20.0.0", - "react-dom": ">= 16.8.0 < 20.0.0", - "search-insights": ">= 1 < 3" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "react": { - "optional": true - }, - "react-dom": { - "optional": true - }, - "search-insights": { - "optional": true - } - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-core": { - "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.17.9.tgz", - "integrity": "sha512-O7BxrpLDPJWWHv/DLA9DRFWs+iY1uOJZkqUwjS5HSZAGcl0hIVCQ97LTLewiZmZ402JYUrun+8NqFP+hCknlbQ==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-plugin-algolia-insights": "1.17.9", - "@algolia/autocomplete-shared": "1.17.9" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-plugin-algolia-insights": { - "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.9.tgz", - "integrity": "sha512-u1fEHkCbWF92DBeB/KHeMacsjsoI0wFhjZtlCq2ddZbAehshbZST6Hs0Avkc0s+4UyBGbMDnSuXHLuvRWK5iDQ==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.17.9" - }, - "peerDependencies": { - "search-insights": ">= 1 < 3" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-preset-algolia": { - "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.9.tgz", - "integrity": "sha512-Na1OuceSJeg8j7ZWn5ssMu/Ax3amtOwk76u4h5J4eK2Nx2KB5qt0Z4cOapCsxot9VcEN11ADV5aUSlQF4RhGjQ==", - "license": "MIT", - "dependencies": { - "@algolia/autocomplete-shared": "1.17.9" - }, - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/autocomplete-shared": { - "version": "1.17.9", - "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.9.tgz", - "integrity": "sha512-iDf05JDQ7I0b7JEA/9IektxN/80a2MZ1ToohfmNS3rfeuQnIKI3IJlIafD0xu4StbtQTghx9T3Maa97ytkXenQ==", - "license": "MIT", - "peerDependencies": { - "@algolia/client-search": ">= 4.9.1 < 6", - "algoliasearch": ">= 4.9.1 < 6" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-abtesting": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-abtesting/-/client-abtesting-5.25.0.tgz", - "integrity": "sha512-1pfQulNUYNf1Tk/svbfjfkLBS36zsuph6m+B6gDkPEivFmso/XnRgwDvjAx80WNtiHnmeNjIXdF7Gos8+OLHqQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-analytics": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-5.25.0.tgz", - "integrity": "sha512-AFbG6VDJX/o2vDd9hqncj1B6B4Tulk61mY0pzTtzKClyTDlNP0xaUiEKhl6E7KO9I/x0FJF5tDCm0Hn6v5x18A==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-common": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.25.0.tgz", - "integrity": "sha512-il1zS/+Rc6la6RaCdSZ2YbJnkQC6W1wiBO8+SH+DE6CPMWBU6iDVzH0sCKSAtMWl9WBxoN6MhNjGBnCv9Yy2bA==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-insights": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-insights/-/client-insights-5.25.0.tgz", - "integrity": "sha512-blbjrUH1siZNfyCGeq0iLQu00w3a4fBXm0WRIM0V8alcAPo7rWjLbMJMrfBtzL9X5ic6wgxVpDADXduGtdrnkw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-personalization": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-5.25.0.tgz", - "integrity": "sha512-aywoEuu1NxChBcHZ1pWaat0Plw7A8jDMwjgRJ00Mcl7wGlwuPt5dJ/LTNcg3McsEUbs2MBNmw0ignXBw9Tbgow==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-query-suggestions": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-query-suggestions/-/client-query-suggestions-5.25.0.tgz", - "integrity": "sha512-a/W2z6XWKjKjIW1QQQV8PTTj1TXtaKx79uR3NGBdBdGvVdt24KzGAaN7sCr5oP8DW4D3cJt44wp2OY/fZcPAVA==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/client-search": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.25.0.tgz", - "integrity": "sha512-9rUYcMIBOrCtYiLX49djyzxqdK9Dya/6Z/8sebPn94BekT+KLOpaZCuc6s0Fpfq7nx5J6YY5LIVFQrtioK9u0g==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/ingestion": { - "version": "1.25.0", - "resolved": "https://registry.npmjs.org/@algolia/ingestion/-/ingestion-1.25.0.tgz", - "integrity": "sha512-jJeH/Hk+k17Vkokf02lkfYE4A+EJX+UgnMhTLR/Mb+d1ya5WhE+po8p5a/Nxb6lo9OLCRl6w3Hmk1TX1e9gVbQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/monitoring": { - "version": "1.25.0", - "resolved": "https://registry.npmjs.org/@algolia/monitoring/-/monitoring-1.25.0.tgz", - "integrity": "sha512-Ls3i1AehJ0C6xaHe7kK9vPmzImOn5zBg7Kzj8tRYIcmCWVyuuFwCIsbuIIz/qzUf1FPSWmw0TZrGeTumk2fqXg==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/recommend": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-5.25.0.tgz", - "integrity": "sha512-79sMdHpiRLXVxSjgw7Pt4R1aNUHxFLHiaTDnN2MQjHwJ1+o3wSseb55T9VXU4kqy3m7TUme3pyRhLk5ip/S4Mw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/requester-browser-xhr": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.25.0.tgz", - "integrity": "sha512-JLaF23p1SOPBmfEqozUAgKHQrGl3z/Z5RHbggBu6s07QqXXcazEsub5VLonCxGVqTv6a61AAPr8J1G5HgGGjEw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/requester-fetch": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.25.0.tgz", - "integrity": "sha512-rtzXwqzFi1edkOF6sXxq+HhmRKDy7tz84u0o5t1fXwz0cwx+cjpmxu/6OQKTdOJFS92JUYHsG51Iunie7xbqfQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/@algolia/requester-node-http": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.25.0.tgz", - "integrity": "sha512-ZO0UKvDyEFvyeJQX0gmZDQEvhLZ2X10K+ps6hViMo1HgE2V8em00SwNsQ+7E/52a+YiBkVWX61pJJJE44juDMQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docsearch/react/node_modules/algoliasearch": { - "version": "5.25.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.25.0.tgz", - "integrity": "sha512-n73BVorL4HIwKlfJKb4SEzAYkR3Buwfwbh+MYxg2mloFph2fFGV58E90QTzdbfzWrLn4HE5Czx/WTjI8fcHaMg==", - "license": "MIT", - "dependencies": { - "@algolia/client-abtesting": "5.25.0", - "@algolia/client-analytics": "5.25.0", - "@algolia/client-common": "5.25.0", - "@algolia/client-insights": "5.25.0", - "@algolia/client-personalization": "5.25.0", - "@algolia/client-query-suggestions": "5.25.0", - "@algolia/client-search": "5.25.0", - "@algolia/ingestion": "1.25.0", - "@algolia/monitoring": "1.25.0", - "@algolia/recommend": "5.25.0", - "@algolia/requester-browser-xhr": "5.25.0", - "@algolia/requester-fetch": "5.25.0", - "@algolia/requester-node-http": "5.25.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/babel": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/babel/-/babel-3.8.0.tgz", - "integrity": "sha512-9EJwSgS6TgB8IzGk1L8XddJLhZod8fXT4ULYMx6SKqyCBqCFpVCEjR/hNXXhnmtVM2irDuzYoVLGWv7srG/VOA==", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.25.9", - "@babel/generator": "^7.25.9", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.25.9", - "@babel/preset-env": "^7.25.9", - "@babel/preset-react": "^7.25.9", - "@babel/preset-typescript": "^7.25.9", - "@babel/runtime": "^7.25.9", - "@babel/runtime-corejs3": "^7.25.9", - "@babel/traverse": "^7.25.9", - "@docusaurus/logger": "3.8.0", - "@docusaurus/utils": "3.8.0", - "babel-plugin-dynamic-import-node": "^2.3.3", - "fs-extra": "^11.1.1", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/bundler": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/bundler/-/bundler-3.8.0.tgz", - "integrity": "sha512-Rq4Z/MSeAHjVzBLirLeMcjLIAQy92pF1OI+2rmt18fSlMARfTGLWRE8Vb+ljQPTOSfJxwDYSzsK6i7XloD2rNA==", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.25.9", - "@docusaurus/babel": "3.8.0", - "@docusaurus/cssnano-preset": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "babel-loader": "^9.2.1", - "clean-css": "^5.3.2", - "copy-webpack-plugin": "^11.0.0", - "css-loader": "^6.8.1", - "css-minimizer-webpack-plugin": "^5.0.1", - "cssnano": "^6.1.2", - "file-loader": "^6.2.0", - "html-minifier-terser": "^7.2.0", - "mini-css-extract-plugin": "^2.9.1", - "null-loader": "^4.0.1", - "postcss": "^8.4.26", - "postcss-loader": "^7.3.3", - "postcss-preset-env": "^10.1.0", - "terser-webpack-plugin": "^5.3.9", - "tslib": "^2.6.0", - "url-loader": "^4.1.1", - "webpack": "^5.95.0", - "webpackbar": "^6.0.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/faster": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/faster": { - "optional": true - } - } - }, - "node_modules/@docusaurus/core": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.8.0.tgz", - "integrity": "sha512-c7u6zFELmSGPEP9WSubhVDjgnpiHgDqMh1qVdCB7rTflh4Jx0msTYmMiO91Ez0KtHj4sIsDsASnjwfJ2IZp3Vw==", - "license": "MIT", - "dependencies": { - "@docusaurus/babel": "3.8.0", - "@docusaurus/bundler": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "cli-table3": "^0.6.3", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "core-js": "^3.31.1", - "detect-port": "^1.5.1", - "escape-html": "^1.0.3", - "eta": "^2.2.0", - "eval": "^0.1.8", - "execa": "5.1.1", - "fs-extra": "^11.1.1", - "html-tags": "^3.3.1", - "html-webpack-plugin": "^5.6.0", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "open": "^8.4.0", - "p-map": "^4.0.0", - "prompts": "^2.4.2", - "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.4", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.4", - "semver": "^7.5.4", - "serve-handler": "^6.1.6", - "tinypool": "^1.0.2", - "tslib": "^2.6.0", - "update-notifier": "^6.0.2", - "webpack": "^5.95.0", - "webpack-bundle-analyzer": "^4.10.2", - "webpack-dev-server": "^4.15.2", - "webpack-merge": "^6.0.1" - }, - "bin": { - "docusaurus": "bin/docusaurus.mjs" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@mdx-js/react": "^3.0.0", - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/cssnano-preset": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.8.0.tgz", - "integrity": "sha512-UJ4hAS2T0R4WNy+phwVff2Q0L5+RXW9cwlH6AEphHR5qw3m/yacfWcSK7ort2pMMbDn8uGrD38BTm4oLkuuNoQ==", - "license": "MIT", - "dependencies": { - "cssnano-preset-advanced": "^6.1.2", - "postcss": "^8.4.38", - "postcss-sort-media-queries": "^5.2.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/logger": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.8.0.tgz", - "integrity": "sha512-7eEMaFIam5Q+v8XwGqF/n0ZoCld4hV4eCCgQkfcN9Mq5inoZa6PHHW9Wu6lmgzoK5Kx3keEeABcO2SxwraoPDQ==", - "license": "MIT", - "dependencies": { - "chalk": "^4.1.2", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/mdx-loader": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.8.0.tgz", - "integrity": "sha512-mDPSzssRnpjSdCGuv7z2EIAnPS1MHuZGTaRLwPn4oQwszu4afjWZ/60sfKjTnjBjI8Vl4OgJl2vMmfmiNDX4Ng==", - "license": "MIT", - "dependencies": { - "@docusaurus/logger": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "@mdx-js/mdx": "^3.0.0", - "@slorber/remark-comment": "^1.0.0", - "escape-html": "^1.0.3", - "estree-util-value-to-estree": "^3.0.1", - "file-loader": "^6.2.0", - "fs-extra": "^11.1.1", - "image-size": "^2.0.2", - "mdast-util-mdx": "^3.0.0", - "mdast-util-to-string": "^4.0.0", - "rehype-raw": "^7.0.0", - "remark-directive": "^3.0.0", - "remark-emoji": "^4.0.0", - "remark-frontmatter": "^5.0.0", - "remark-gfm": "^4.0.0", - "stringify-object": "^3.3.0", - "tslib": "^2.6.0", - "unified": "^11.0.3", - "unist-util-visit": "^5.0.0", - "url-loader": "^4.1.1", - "vfile": "^6.0.1", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/module-type-aliases": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.8.0.tgz", - "integrity": "sha512-/uMb4Ipt5J/QnD13MpnoC/A4EYAe6DKNWqTWLlGrqsPJwJv73vSwkA25xnYunwfqWk0FlUQfGv/Swdh5eCCg7g==", - "license": "MIT", - "dependencies": { - "@docusaurus/types": "3.8.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" - }, - "peerDependencies": { - "react": "*", - "react-dom": "*" - } - }, - "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.8.0.tgz", - "integrity": "sha512-0SlOTd9R55WEr1GgIXu+hhTT0hzARYx3zIScA5IzpdekZQesI/hKEa5LPHBd415fLkWMjdD59TaW/3qQKpJ0Lg==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "cheerio": "1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^11.1.1", - "lodash": "^4.17.21", - "schema-dts": "^1.1.2", - "srcset": "^4.0.0", - "tslib": "^2.6.0", - "unist-util-visit": "^5.0.0", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/plugin-content-docs": "*", - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.8.0.tgz", - "integrity": "sha512-fRDMFLbUN6eVRXcjP8s3Y7HpAt9pzPYh1F/7KKXOCxvJhjjCtbon4VJW0WndEPInVz4t8QUXn5QZkU2tGVCE2g==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "@types/react-router-config": "^5.0.7", - "combine-promises": "^1.1.0", - "fs-extra": "^11.1.1", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "schema-dts": "^1.1.2", - "tslib": "^2.6.0", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.8.0.tgz", - "integrity": "sha512-39EDx2y1GA0Pxfion5tQZLNJxL4gq6susd1xzetVBjVIQtwpCdyloOfQBAgX0FylqQxfJrYqL0DIUuq7rd7uBw==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "fs-extra": "^11.1.1", - "tslib": "^2.6.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-css-cascade-layers": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-css-cascade-layers/-/plugin-css-cascade-layers-3.8.0.tgz", - "integrity": "sha512-/VBTNymPIxQB8oA3ZQ4GFFRYdH4ZxDRRBECxyjRyv486mfUPXfcdk+im4S5mKWa6EK2JzBz95IH/Wu0qQgJ5yQ==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/plugin-debug": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.8.0.tgz", - "integrity": "sha512-teonJvJsDB9o2OnG6ifbhblg/PXzZvpUKHFgD8dOL1UJ58u0lk8o0ZOkvaYEBa9nDgqzoWrRk9w+e3qaG2mOhQ==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "fs-extra": "^11.1.1", - "react-json-view-lite": "^2.3.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.8.0.tgz", - "integrity": "sha512-aKKa7Q8+3xRSRESipNvlFgNp3FNPELKhuo48Cg/svQbGNwidSHbZT03JqbW4cBaQnyyVchO1ttk+kJ5VC9Gx0w==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.8.0.tgz", - "integrity": "sha512-ugQYMGF4BjbAW/JIBtVcp+9eZEgT9HRdvdcDudl5rywNPBA0lct+lXMG3r17s02rrhInMpjMahN3Yc9Cb3H5/g==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "@types/gtag.js": "^0.0.12", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.8.0.tgz", - "integrity": "sha512-9juRWxbwZD3SV02Jd9QB6yeN7eu+7T4zB0bvJLcVQwi+am51wAxn2CwbdL0YCCX+9OfiXbADE8D8Q65Hbopu/w==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.8.0.tgz", - "integrity": "sha512-fGpOIyJvNiuAb90nSJ2Gfy/hUOaDu6826e5w5UxPmbpCIc7KlBHNAZ5g4L4ZuHhc4hdfq4mzVBsQSnne+8Ze1g==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "fs-extra": "^11.1.1", - "sitemap": "^7.1.1", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/plugin-svgr": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-svgr/-/plugin-svgr-3.8.0.tgz", - "integrity": "sha512-kEDyry+4OMz6BWLG/lEqrNsL/w818bywK70N1gytViw4m9iAmoxCUT7Ri9Dgs7xUdzCHJ3OujolEmD88Wy44OA==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "@svgr/core": "8.1.0", - "@svgr/webpack": "^8.1.0", - "tslib": "^2.6.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/preset-classic": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.8.0.tgz", - "integrity": "sha512-qOu6tQDOWv+rpTlKu+eJATCJVGnABpRCPuqf7LbEaQ1mNY//N/P8cHQwkpAU+aweQfarcZ0XfwCqRHJfjeSV/g==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/plugin-content-blog": "3.8.0", - "@docusaurus/plugin-content-docs": "3.8.0", - "@docusaurus/plugin-content-pages": "3.8.0", - "@docusaurus/plugin-css-cascade-layers": "3.8.0", - "@docusaurus/plugin-debug": "3.8.0", - "@docusaurus/plugin-google-analytics": "3.8.0", - "@docusaurus/plugin-google-gtag": "3.8.0", - "@docusaurus/plugin-google-tag-manager": "3.8.0", - "@docusaurus/plugin-sitemap": "3.8.0", - "@docusaurus/plugin-svgr": "3.8.0", - "@docusaurus/theme-classic": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/theme-search-algolia": "3.8.0", - "@docusaurus/types": "3.8.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/theme-classic": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.8.0.tgz", - "integrity": "sha512-nQWFiD5ZjoT76OaELt2n33P3WVuuCz8Dt5KFRP2fCBo2r9JCLsp2GJjZpnaG24LZ5/arRjv4VqWKgpK0/YLt7g==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/plugin-content-blog": "3.8.0", - "@docusaurus/plugin-content-docs": "3.8.0", - "@docusaurus/plugin-content-pages": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/theme-translations": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "@mdx-js/react": "^3.0.0", - "clsx": "^2.0.0", - "copy-text-to-clipboard": "^3.2.0", - "infima": "0.2.0-alpha.45", - "lodash": "^4.17.21", - "nprogress": "^0.2.0", - "postcss": "^8.4.26", - "prism-react-renderer": "^2.3.0", - "prismjs": "^1.29.0", - "react-router-dom": "^5.3.4", - "rtlcss": "^4.1.0", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/theme-common": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.8.0.tgz", - "integrity": "sha512-YqV2vAWpXGLA+A3PMLrOMtqgTHJLDcT+1Caa6RF7N4/IWgrevy5diY8oIHFkXR/eybjcrFFjUPrHif8gSGs3Tw==", - "license": "MIT", - "dependencies": { - "@docusaurus/mdx-loader": "3.8.0", - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^2.0.0", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^2.3.0", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "@docusaurus/plugin-content-docs": "*", - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/theme-mermaid": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-3.8.0.tgz", - "integrity": "sha512-ou0NJM37p4xrVuFaZp8qFe5Z/qBq9LuyRTP4KKRa0u2J3zC4f3saBJDgc56FyvvN1OsmU0189KGEPUjTr6hFxg==", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.8.0", - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "mermaid": ">=11.6.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.8.0.tgz", - "integrity": "sha512-GBZ5UOcPgiu6nUw153+0+PNWvFKweSnvKIL6Rp04H9olKb475jfKjAwCCtju5D2xs5qXHvCMvzWOg5o9f6DtuQ==", - "license": "MIT", - "dependencies": { - "@docsearch/react": "^3.9.0", - "@docusaurus/core": "3.8.0", - "@docusaurus/logger": "3.8.0", - "@docusaurus/plugin-content-docs": "3.8.0", - "@docusaurus/theme-common": "3.8.0", - "@docusaurus/theme-translations": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-validation": "3.8.0", - "algoliasearch": "^5.17.1", - "algoliasearch-helper": "^3.22.6", - "clsx": "^2.0.0", - "eta": "^2.2.0", - "fs-extra": "^11.1.1", - "lodash": "^4.17.21", - "tslib": "^2.6.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=18.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/client-analytics": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-5.19.0.tgz", - "integrity": "sha512-CDW4RwnCHzU10upPJqS6N6YwDpDHno7w6/qXT9KPbPbt8szIIzCHrva4O9KIfx1OhdsHzfGSI5hMAiOOYl4DEQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/client-common": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.19.0.tgz", - "integrity": "sha512-2ERRbICHXvtj5kfFpY5r8qu9pJII/NAHsdgUXnUitQFwPdPL7wXiupcvZJC7DSntOnE8AE0lM7oDsPhrJfj5nQ==", - "license": "MIT", - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/client-personalization": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-5.19.0.tgz", - "integrity": "sha512-B9eoce/fk8NLboGje+pMr72pw+PV7c5Z01On477heTZ7jkxoZ4X92dobeGuEQop61cJ93Gaevd1of4mBr4hu2A==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/client-search": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.19.0.tgz", - "integrity": "sha512-Ctg3xXD/1VtcwmkulR5+cKGOMj4r0wC49Y/KZdGQcqpydKn+e86F6l3tb3utLJQVq4lpEJud6kdRykFgcNsp8Q==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/recommend": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-5.19.0.tgz", - "integrity": "sha512-PbgrMTbUPlmwfJsxjFhal4XqZO2kpBNRjemLVTkUiti4w/+kzcYO4Hg5zaBgVqPwvFDNQ8JS4SS3TBBem88u+g==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/requester-browser-xhr": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.19.0.tgz", - "integrity": "sha512-GfnhnQBT23mW/VMNs7m1qyEyZzhZz093aY2x8p0era96MMyNv8+FxGek5pjVX0b57tmSCZPf4EqNCpkGcGsmbw==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@algolia/requester-node-http": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.19.0.tgz", - "integrity": "sha512-p6t8ue0XZNjcRiqNkb5QAM0qQRAKsCiebZ6n9JjWA+p8fWf8BvnhO55y2fO28g3GW0Imj7PrAuyBuxq8aDVQwQ==", - "license": "MIT", - "dependencies": { - "@algolia/client-common": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/algoliasearch": { - "version": "5.19.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.19.0.tgz", - "integrity": "sha512-zrLtGhC63z3sVLDDKGW+SlCRN9eJHFTgdEmoAOpsVh6wgGL1GgTTDou7tpCBjevzgIvi3AIyDAQO3Xjbg5eqZg==", - "license": "MIT", - "dependencies": { - "@algolia/client-abtesting": "5.19.0", - "@algolia/client-analytics": "5.19.0", - "@algolia/client-common": "5.19.0", - "@algolia/client-insights": "5.19.0", - "@algolia/client-personalization": "5.19.0", - "@algolia/client-query-suggestions": "5.19.0", - "@algolia/client-search": "5.19.0", - "@algolia/ingestion": "1.19.0", - "@algolia/monitoring": "1.19.0", - "@algolia/recommend": "5.19.0", - "@algolia/requester-browser-xhr": "5.19.0", - "@algolia/requester-fetch": "5.19.0", - "@algolia/requester-node-http": "5.19.0" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@docusaurus/theme-translations": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.8.0.tgz", - "integrity": "sha512-1DTy/snHicgkCkryWq54fZvsAglTdjTx4qjOXgqnXJ+DIty1B+aPQrAVUu8LiM+6BiILfmNxYsxhKTj+BS3PZg==", - "license": "MIT", - "dependencies": { - "fs-extra": "^11.1.1", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/types": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.8.0.tgz", - "integrity": "sha512-RDEClpwNxZq02c+JlaKLWoS13qwWhjcNsi2wG1UpzmEnuti/z1Wx4SGpqbUqRPNSd8QWWePR8Cb7DvG0VN/TtA==", - "license": "MIT", - "dependencies": { - "@mdx-js/mdx": "^3.0.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.9.2", - "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.95.0", - "webpack-merge": "^5.9.0" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/@docusaurus/types/node_modules/webpack-merge": { - "version": "5.10.0", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", - "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", - "license": "MIT", - "dependencies": { - "clone-deep": "^4.0.1", - "flat": "^5.0.2", - "wildcard": "^2.0.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@docusaurus/utils": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.8.0.tgz", - "integrity": "sha512-2wvtG28ALCN/A1WCSLxPASFBFzXCnP0YKCAFIPcvEb6imNu1wg7ni/Svcp71b3Z2FaOFFIv4Hq+j4gD7gA0yfQ==", - "license": "MIT", - "dependencies": { - "@docusaurus/logger": "3.8.0", - "@docusaurus/types": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "escape-string-regexp": "^4.0.0", - "execa": "5.1.1", - "file-loader": "^6.2.0", - "fs-extra": "^11.1.1", - "github-slugger": "^1.5.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "jiti": "^1.20.0", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "p-queue": "^6.6.2", - "prompts": "^2.4.2", - "resolve-pathname": "^3.0.0", - "tslib": "^2.6.0", - "url-loader": "^4.1.1", - "utility-types": "^3.10.0", - "webpack": "^5.88.1" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/utils-common": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.8.0.tgz", - "integrity": "sha512-3TGF+wVTGgQ3pAc9+5jVchES4uXUAhAt9pwv7uws4mVOxL4alvU3ue/EZ+R4XuGk94pDy7CNXjRXpPjlfZXQfw==", - "license": "MIT", - "dependencies": { - "@docusaurus/types": "3.8.0", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@docusaurus/utils-validation": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.8.0.tgz", - "integrity": "sha512-MrnEbkigr54HkdFeg8e4FKc4EF+E9dlVwsY3XQZsNkbv3MKZnbHQ5LsNJDIKDROFe8PBf5C4qCAg5TPBpsjrjg==", - "license": "MIT", - "dependencies": { - "@docusaurus/logger": "3.8.0", - "@docusaurus/utils": "3.8.0", - "@docusaurus/utils-common": "3.8.0", - "fs-extra": "^11.2.0", - "joi": "^17.9.2", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.6.0" - }, - "engines": { - "node": ">=18.0" - } - }, - "node_modules/@hapi/hoek": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", - "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@hapi/topo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@iconify/types": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", - "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", - "license": "MIT" - }, - "node_modules/@iconify/utils": { - "version": "2.1.33", - "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-2.1.33.tgz", - "integrity": "sha512-jP9h6v/g0BIZx0p7XGJJVtkVnydtbgTgt9mVNcGDYwaa7UhdHdI9dvoq+gKj9sijMSJKxUPEG2JyjsgXjxL7Kw==", - "license": "MIT", - "dependencies": { - "@antfu/install-pkg": "^0.4.0", - "@antfu/utils": "^0.7.10", - "@iconify/types": "^2.0.0", - "debug": "^4.3.6", - "kolorist": "^1.8.0", - "local-pkg": "^0.5.0", - "mlly": "^1.7.1" - } - }, - "node_modules/@jest/schemas": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", - "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", - "license": "MIT", - "dependencies": { - "@sinclair/typebox": "^0.27.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/types": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", - "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", - "license": "MIT", - "dependencies": { - "@jest/schemas": "^29.6.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", - "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", - "license": "MIT", - "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/source-map": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", - "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", - "license": "MIT", - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.5", - "@jridgewell/trace-mapping": "^0.3.25" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", - "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", - "license": "MIT" - }, - "node_modules/@mdx-js/mdx": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.0.tgz", - "integrity": "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdx": "^2.0.0", - "collapse-white-space": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "estree-util-scope": "^1.0.0", - "estree-walker": "^3.0.0", - "hast-util-to-jsx-runtime": "^2.0.0", - "markdown-extensions": "^2.0.0", - "recma-build-jsx": "^1.0.0", - "recma-jsx": "^1.0.0", - "recma-stringify": "^1.0.0", - "rehype-recma": "^1.0.0", - "remark-mdx": "^3.0.0", - "remark-parse": "^11.0.0", - "remark-rehype": "^11.0.0", - "source-map": "^0.7.0", - "unified": "^11.0.0", - "unist-util-position-from-estree": "^2.0.0", - "unist-util-stringify-position": "^4.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@mdx-js/react": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz", - "integrity": "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==", - "license": "MIT", - "dependencies": { - "@types/mdx": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - }, - "peerDependencies": { - "@types/react": ">=16", - "react": ">=16" - } - }, - "node_modules/@mermaid-js/parser": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.4.0.tgz", - "integrity": "sha512-wla8XOWvQAwuqy+gxiZqY+c7FokraOTHRWMsbB4AgRx9Sy7zKslNyejy7E+a77qHfey5GXw/ik3IXv/NHMJgaA==", - "license": "MIT", - "dependencies": { - "langium": "3.3.1" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@pnpm/config.env-replace": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", - "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", - "license": "MIT", - "engines": { - "node": ">=12.22.0" - } - }, - "node_modules/@pnpm/network.ca-file": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", - "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", - "license": "MIT", - "dependencies": { - "graceful-fs": "4.2.10" - }, - "engines": { - "node": ">=12.22.0" - } - }, - "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { - "version": "4.2.10", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", - "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", - "license": "ISC" - }, - "node_modules/@pnpm/npm-conf": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz", - "integrity": "sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw==", - "license": "MIT", - "dependencies": { - "@pnpm/config.env-replace": "^1.1.0", - "@pnpm/network.ca-file": "^1.0.1", - "config-chain": "^1.1.11" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@polka/url": { - "version": "1.0.0-next.28", - "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.28.tgz", - "integrity": "sha512-8LduaNlMZGwdZ6qWrKlfa+2M4gahzFkprZiAt2TF8uS0qQgBizKXpXURqvTJ4WtmupWxaLqjRb2UCTe72mu+Aw==", - "license": "MIT" - }, - "node_modules/@sideway/address": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", - "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, - "node_modules/@sideway/formula": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", - "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", - "license": "BSD-3-Clause" - }, - "node_modules/@sideway/pinpoint": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", - "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "license": "MIT" - }, - "node_modules/@sindresorhus/is": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", - "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" - } - }, - "node_modules/@slorber/remark-comment": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", - "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", - "license": "MIT", - "dependencies": { - "micromark-factory-space": "^1.0.0", - "micromark-util-character": "^1.1.0", - "micromark-util-symbol": "^1.0.1" - } - }, - "node_modules/@svgr/babel-plugin-add-jsx-attribute": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", - "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", - "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", - "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", - "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-dynamic-title": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", - "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-svg-em-dimensions": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", - "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-react-native-svg": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", - "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-plugin-transform-svg-component": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", - "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/babel-preset": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", - "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", - "license": "MIT", - "dependencies": { - "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", - "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", - "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", - "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", - "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", - "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", - "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", - "@svgr/babel-plugin-transform-svg-component": "8.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@svgr/core": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", - "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.21.3", - "@svgr/babel-preset": "8.1.0", - "camelcase": "^6.2.0", - "cosmiconfig": "^8.1.3", - "snake-case": "^3.0.4" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/hast-util-to-babel-ast": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", - "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", - "license": "MIT", - "dependencies": { - "@babel/types": "^7.21.3", - "entities": "^4.4.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/plugin-jsx": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", - "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.21.3", - "@svgr/babel-preset": "8.1.0", - "@svgr/hast-util-to-babel-ast": "8.0.0", - "svg-parser": "^2.0.4" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "*" - } - }, - "node_modules/@svgr/plugin-svgo": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", - "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", - "license": "MIT", - "dependencies": { - "cosmiconfig": "^8.1.3", - "deepmerge": "^4.3.1", - "svgo": "^3.0.2" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - }, - "peerDependencies": { - "@svgr/core": "*" - } - }, - "node_modules/@svgr/webpack": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", - "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", - "license": "MIT", - "dependencies": { - "@babel/core": "^7.21.3", - "@babel/plugin-transform-react-constant-elements": "^7.21.3", - "@babel/preset-env": "^7.20.2", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.21.0", - "@svgr/core": "8.1.0", - "@svgr/plugin-jsx": "8.1.0", - "@svgr/plugin-svgo": "8.1.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", - "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", - "license": "MIT", - "dependencies": { - "defer-to-connect": "^2.0.1" - }, - "engines": { - "node": ">=14.16" - } - }, - "node_modules/@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", - "license": "ISC", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/@types/acorn": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", - "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", - "license": "MIT", - "dependencies": { - "@types/estree": "*" - } - }, - "node_modules/@types/body-parser": { - "version": "1.19.5", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", - "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", - "license": "MIT", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/bonjour": { - "version": "3.5.13", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", - "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.38", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", - "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", - "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", - "license": "MIT", - "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "node_modules/@types/d3": { - "version": "7.4.3", - "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", - "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", - "license": "MIT", - "dependencies": { - "@types/d3-array": "*", - "@types/d3-axis": "*", - "@types/d3-brush": "*", - "@types/d3-chord": "*", - "@types/d3-color": "*", - "@types/d3-contour": "*", - "@types/d3-delaunay": "*", - "@types/d3-dispatch": "*", - "@types/d3-drag": "*", - "@types/d3-dsv": "*", - "@types/d3-ease": "*", - "@types/d3-fetch": "*", - "@types/d3-force": "*", - "@types/d3-format": "*", - "@types/d3-geo": "*", - "@types/d3-hierarchy": "*", - "@types/d3-interpolate": "*", - "@types/d3-path": "*", - "@types/d3-polygon": "*", - "@types/d3-quadtree": "*", - "@types/d3-random": "*", - "@types/d3-scale": "*", - "@types/d3-scale-chromatic": "*", - "@types/d3-selection": "*", - "@types/d3-shape": "*", - "@types/d3-time": "*", - "@types/d3-time-format": "*", - "@types/d3-timer": "*", - "@types/d3-transition": "*", - "@types/d3-zoom": "*" - } - }, - "node_modules/@types/d3-array": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", - "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==", - "license": "MIT" - }, - "node_modules/@types/d3-axis": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", - "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", - "license": "MIT", - "dependencies": { - "@types/d3-selection": "*" - } - }, - "node_modules/@types/d3-brush": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", - "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", - "license": "MIT", - "dependencies": { - "@types/d3-selection": "*" - } - }, - "node_modules/@types/d3-chord": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", - "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", - "license": "MIT" - }, - "node_modules/@types/d3-color": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", - "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", - "license": "MIT" - }, - "node_modules/@types/d3-contour": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", - "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", - "license": "MIT", - "dependencies": { - "@types/d3-array": "*", - "@types/geojson": "*" - } - }, - "node_modules/@types/d3-delaunay": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", - "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", - "license": "MIT" - }, - "node_modules/@types/d3-dispatch": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.6.tgz", - "integrity": "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ==", - "license": "MIT" - }, - "node_modules/@types/d3-drag": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", - "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", - "license": "MIT", - "dependencies": { - "@types/d3-selection": "*" - } - }, - "node_modules/@types/d3-dsv": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", - "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", - "license": "MIT" - }, - "node_modules/@types/d3-ease": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", - "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", - "license": "MIT" - }, - "node_modules/@types/d3-fetch": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", - "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", - "license": "MIT", - "dependencies": { - "@types/d3-dsv": "*" - } - }, - "node_modules/@types/d3-force": { - "version": "3.0.10", - "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", - "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", - "license": "MIT" - }, - "node_modules/@types/d3-format": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", - "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", - "license": "MIT" - }, - "node_modules/@types/d3-geo": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", - "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", - "license": "MIT", - "dependencies": { - "@types/geojson": "*" - } - }, - "node_modules/@types/d3-hierarchy": { - "version": "3.1.7", - "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", - "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", - "license": "MIT" - }, - "node_modules/@types/d3-interpolate": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", - "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", - "license": "MIT", - "dependencies": { - "@types/d3-color": "*" - } - }, - "node_modules/@types/d3-path": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.0.tgz", - "integrity": "sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ==", - "license": "MIT" - }, - "node_modules/@types/d3-polygon": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", - "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", - "license": "MIT" - }, - "node_modules/@types/d3-quadtree": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", - "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", - "license": "MIT" - }, - "node_modules/@types/d3-random": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", - "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", - "license": "MIT" - }, - "node_modules/@types/d3-scale": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz", - "integrity": "sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==", - "license": "MIT", - "dependencies": { - "@types/d3-time": "*" - } - }, - "node_modules/@types/d3-scale-chromatic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz", - "integrity": "sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw==", - "license": "MIT" - }, - "node_modules/@types/d3-selection": { - "version": "3.0.11", - "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", - "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", - "license": "MIT" - }, - "node_modules/@types/d3-shape": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.6.tgz", - "integrity": "sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==", - "license": "MIT", - "dependencies": { - "@types/d3-path": "*" - } - }, - "node_modules/@types/d3-time": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz", - "integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw==", - "license": "MIT" - }, - "node_modules/@types/d3-time-format": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", - "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", - "license": "MIT" - }, - "node_modules/@types/d3-timer": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", - "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", - "license": "MIT" - }, - "node_modules/@types/d3-transition": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", - "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", - "license": "MIT", - "dependencies": { - "@types/d3-selection": "*" - } - }, - "node_modules/@types/d3-zoom": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", - "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", - "license": "MIT", - "dependencies": { - "@types/d3-interpolate": "*", - "@types/d3-selection": "*" - } - }, - "node_modules/@types/debug": { - "version": "4.1.12", - "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", - "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", - "license": "MIT", - "dependencies": { - "@types/ms": "*" - } - }, - "node_modules/@types/eslint": { - "version": "9.6.1", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", - "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", - "license": "MIT", - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.7", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", - "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", - "license": "MIT", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, - "node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", - "license": "MIT" - }, - "node_modules/@types/estree-jsx": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", - "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", - "license": "MIT", - "dependencies": { - "@types/estree": "*" - } - }, - "node_modules/@types/express": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", - "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", - "license": "MIT", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.0.1.tgz", - "integrity": "sha512-CRICJIl0N5cXDONAdlTv5ShATZ4HEwk6kDDIW2/w9qOWKg+NU/5F8wYRWCrONad0/UKkloNSmmyN/wX4rtpbVA==", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" - } - }, - "node_modules/@types/express/node_modules/@types/express-serve-static-core": { - "version": "4.19.6", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", - "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" - } - }, - "node_modules/@types/geojson": { - "version": "7946.0.14", - "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.14.tgz", - "integrity": "sha512-WCfD5Ht3ZesJUsONdhvm84dmzWOiOzOAqOncN0++w0lBw1o8OuDNJF2McvvCef/yBqb/HYRahp1BYtODFQ8bRg==", - "license": "MIT" - }, - "node_modules/@types/gtag.js": { - "version": "0.0.12", - "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", - "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==", - "license": "MIT" - }, - "node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "license": "MIT", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/history": { - "version": "4.7.11", - "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", - "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==", - "license": "MIT" - }, - "node_modules/@types/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", - "license": "MIT" - }, - "node_modules/@types/http-cache-semantics": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", - "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", - "license": "MIT" - }, - "node_modules/@types/http-errors": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", - "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==", - "license": "MIT" - }, - "node_modules/@types/http-proxy": { - "version": "1.17.15", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.15.tgz", - "integrity": "sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", - "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", - "license": "MIT" - }, - "node_modules/@types/istanbul-lib-report": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", - "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-coverage": "*" - } - }, - "node_modules/@types/istanbul-reports": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", - "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-report": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "license": "MIT" - }, - "node_modules/@types/mdast": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", - "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", - "license": "MIT", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/@types/mdx": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", - "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", - "license": "MIT" - }, - "node_modules/@types/mime": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", - "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", - "license": "MIT" - }, - "node_modules/@types/ms": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", - "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==", - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "22.9.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.9.1.tgz", - "integrity": "sha512-p8Yy/8sw1caA8CdRIQBG5tiLHmxtQKObCijiAa9Ez+d4+PRffM4054xbju0msf+cvhJpnFEeNjxmVT/0ipktrg==", - "license": "MIT", - "dependencies": { - "undici-types": "~6.19.8" - } - }, - "node_modules/@types/node-forge": { - "version": "1.3.11", - "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", - "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/prismjs": { - "version": "1.26.5", - "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.5.tgz", - "integrity": "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ==", - "license": "MIT" - }, - "node_modules/@types/prop-types": { - "version": "15.7.13", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.13.tgz", - "integrity": "sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==", - "license": "MIT" - }, - "node_modules/@types/qs": { - "version": "6.9.17", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.17.tgz", - "integrity": "sha512-rX4/bPcfmvxHDv0XjfJELTTr+iB+tn032nPILqHm5wbthUUUuVtNGGqzhya9XUxjTP8Fpr0qYgSZZKxGY++svQ==", - "license": "MIT" - }, - "node_modules/@types/range-parser": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", - "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", - "license": "MIT" - }, - "node_modules/@types/react": { - "version": "18.3.12", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.12.tgz", - "integrity": "sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw==", - "license": "MIT", - "dependencies": { - "@types/prop-types": "*", - "csstype": "^3.0.2" - } - }, - "node_modules/@types/react-router": { - "version": "5.1.20", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", - "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "node_modules/@types/react-router-config": { - "version": "5.0.11", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", - "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "^5.1.0" - } - }, - "node_modules/@types/react-router-dom": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "license": "MIT" - }, - "node_modules/@types/sax": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", - "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/send": { - "version": "0.17.4", - "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", - "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", - "license": "MIT", - "dependencies": { - "@types/mime": "^1", - "@types/node": "*" - } - }, - "node_modules/@types/serve-index": { - "version": "1.9.4", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", - "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", - "license": "MIT", - "dependencies": { - "@types/express": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "1.15.7", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", - "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", - "license": "MIT", - "dependencies": { - "@types/http-errors": "*", - "@types/node": "*", - "@types/send": "*" - } - }, - "node_modules/@types/sockjs": { - "version": "0.3.36", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", - "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/trusted-types": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", - "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", - "license": "MIT", - "optional": true - }, - "node_modules/@types/unist": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", - "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", - "license": "MIT" - }, - "node_modules/@types/ws": { - "version": "8.5.13", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.13.tgz", - "integrity": "sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA==", - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/yargs": { - "version": "17.0.33", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", - "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/@types/yargs-parser": { - "version": "21.0.3", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", - "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", - "license": "MIT" - }, - "node_modules/@ungap/structured-clone": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", - "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", - "license": "ISC" - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", - "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/helper-numbers": "1.13.2", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", - "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", - "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", - "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", - "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.13.2", - "@webassemblyjs/helper-api-error": "1.13.2", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", - "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", - "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-buffer": "1.14.1", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2", - "@webassemblyjs/wasm-gen": "1.14.1" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", - "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", - "license": "MIT", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", - "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", - "license": "Apache-2.0", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", - "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", - "license": "MIT" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", - "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-buffer": "1.14.1", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2", - "@webassemblyjs/helper-wasm-section": "1.14.1", - "@webassemblyjs/wasm-gen": "1.14.1", - "@webassemblyjs/wasm-opt": "1.14.1", - "@webassemblyjs/wasm-parser": "1.14.1", - "@webassemblyjs/wast-printer": "1.14.1" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", - "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2", - "@webassemblyjs/ieee754": "1.13.2", - "@webassemblyjs/leb128": "1.13.2", - "@webassemblyjs/utf8": "1.13.2" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", - "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-buffer": "1.14.1", - "@webassemblyjs/wasm-gen": "1.14.1", - "@webassemblyjs/wasm-parser": "1.14.1" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", - "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@webassemblyjs/helper-api-error": "1.13.2", - "@webassemblyjs/helper-wasm-bytecode": "1.13.2", - "@webassemblyjs/ieee754": "1.13.2", - "@webassemblyjs/leb128": "1.13.2", - "@webassemblyjs/utf8": "1.13.2" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", - "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.14.1", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", - "license": "BSD-3-Clause" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", - "license": "Apache-2.0" - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "license": "MIT", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/accepts/node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "8.14.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", - "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "license": "MIT", - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/acorn-walk": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", - "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", - "license": "MIT", - "dependencies": { - "acorn": "^8.11.0" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/address": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", - "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "license": "MIT", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ajv": { - "version": "8.17.1", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", - "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "fast-uri": "^3.0.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", - "license": "MIT", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-keywords": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", - "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/algoliasearch": { - "version": "4.24.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", - "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", - "license": "MIT", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.24.0", - "@algolia/cache-common": "4.24.0", - "@algolia/cache-in-memory": "4.24.0", - "@algolia/client-account": "4.24.0", - "@algolia/client-analytics": "4.24.0", - "@algolia/client-common": "4.24.0", - "@algolia/client-personalization": "4.24.0", - "@algolia/client-search": "4.24.0", - "@algolia/logger-common": "4.24.0", - "@algolia/logger-console": "4.24.0", - "@algolia/recommend": "4.24.0", - "@algolia/requester-browser-xhr": "4.24.0", - "@algolia/requester-common": "4.24.0", - "@algolia/requester-node-http": "4.24.0", - "@algolia/transporter": "4.24.0" - } - }, - "node_modules/algoliasearch-helper": { - "version": "3.23.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.23.0.tgz", - "integrity": "sha512-8CK4Gb/ju4OesAYcS+mjBpNiVA7ILWpg7D2vhBZohh0YkG8QT1KZ9LG+8+EntQBUGoKtPy06OFhiwP4f5zzAQg==", - "license": "MIT", - "dependencies": { - "@algolia/events": "^4.0.1" - }, - "peerDependencies": { - "algoliasearch": ">= 3.1 < 6" - } - }, - "node_modules/ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "license": "ISC", - "dependencies": { - "string-width": "^4.1.0" - } - }, - "node_modules/ansi-align/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/ansi-align/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "license": "MIT", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-escapes/node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", - "engines": [ - "node >= 0.8.0" - ], - "license": "Apache-2.0", - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "license": "ISC", - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "license": "MIT" - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "license": "Python-2.0" - }, - "node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", - "license": "MIT" - }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/asciinema-player": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/asciinema-player/-/asciinema-player-3.10.0.tgz", - "integrity": "sha512-shoOK6F606nDKZxDVM7JuGSCAyWLePoGRFNlV+FqiP5Sqvyn0BlE7wlbjZyd2X4P1iRhv/HKfVNtnQIxmgphRA==", - "license": "Apache-2.0", - "dependencies": { - "@babel/runtime": "^7.21.0", - "solid-js": "^1.3.0" - } - }, - "node_modules/astring": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", - "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", - "license": "MIT", - "bin": { - "astring": "bin/astring" - } - }, - "node_modules/autoprefixer": { - "version": "10.4.20", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", - "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.3", - "caniuse-lite": "^1.0.30001646", - "fraction.js": "^4.3.7", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.1", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/babel-loader": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.2.1.tgz", - "integrity": "sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==", - "license": "MIT", - "dependencies": { - "find-cache-dir": "^4.0.0", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0", - "webpack": ">=5" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "license": "MIT", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.4.12", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.12.tgz", - "integrity": "sha512-CPWT6BwvhrTO2d8QVorhTCQw9Y43zOu7G9HigcfxvepOU6b8o3tcWad6oVgZIsZCTt42FFv97aA7ZJsbM4+8og==", - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.22.6", - "@babel/helper-define-polyfill-provider": "^0.6.3", - "semver": "^6.3.1" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.10.6", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz", - "integrity": "sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA==", - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.6.2", - "core-js-compat": "^3.38.0" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.3.tgz", - "integrity": "sha512-LiWSbl4CRSIa5x/JAU6jZiG9eit9w6mz+yVMFwDE83LAWvt0AfGBoZ7HS/mkhrKuh2ZlzfVZYKoLjXdqw6Yt7Q==", - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.6.3" - }, - "peerDependencies": { - "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" - } - }, - "node_modules/bail": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", - "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "license": "MIT" - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", - "license": "MIT" - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "license": "MIT", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/body-parser": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", - "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "content-type": "~1.0.5", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.13.0", - "raw-body": "2.5.2", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/body-parser/node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/body-parser/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/bonjour-service": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.3.0.tgz", - "integrity": "sha512-3YuAUiSkWykd+2Azjgyxei8OWf8thdn8AITIog2M4UICzoqfjlqr64WIjEXZllf/W6vK1goqleSR6brGomxQqA==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "multicast-dns": "^7.2.5" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", - "license": "ISC" - }, - "node_modules/boxen": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", - "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^6.2.0", - "chalk": "^4.1.2", - "cli-boxes": "^3.0.0", - "string-width": "^5.0.1", - "type-fest": "^2.5.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.0.1" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/browserslist": { - "version": "4.24.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", - "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "caniuse-lite": "^1.0.30001669", - "electron-to-chromium": "^1.5.41", - "node-releases": "^2.0.18", - "update-browserslist-db": "^1.1.1" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "license": "MIT" - }, - "node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cacheable-lookup": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", - "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", - "license": "MIT", - "engines": { - "node": ">=14.16" - } - }, - "node_modules/cacheable-request": { - "version": "10.2.14", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", - "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", - "license": "MIT", - "dependencies": { - "@types/http-cache-semantics": "^4.0.2", - "get-stream": "^6.0.1", - "http-cache-semantics": "^4.1.1", - "keyv": "^4.5.3", - "mimic-response": "^4.0.0", - "normalize-url": "^8.0.0", - "responselike": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - } - }, - "node_modules/call-bind": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", - "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", - "license": "MIT", - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/camel-case": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", - "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", - "license": "MIT", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, - "node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001680", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001680.tgz", - "integrity": "sha512-rPQy70G6AGUMnbwS1z6Xg+RkHYPAi18ihs47GH0jcxIG7wArmPgY3XbS2sRdBbxJljp3thdT8BIqv9ccCypiPA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "CC-BY-4.0" - }, - "node_modules/ccount": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", - "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/char-regex": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", - "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/character-entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-entities-html4": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", - "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-entities-legacy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", - "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/character-reference-invalid": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", - "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.12", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", - "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", - "license": "MIT", - "dependencies": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "htmlparser2": "^8.0.1", - "parse5": "^7.0.0", - "parse5-htmlparser2-tree-adapter": "^7.0.0" - }, - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/cheeriojs/cheerio?sponsor=1" - } - }, - "node_modules/cheerio-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", - "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/chevrotain": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.0.3.tgz", - "integrity": "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==", - "license": "Apache-2.0", - "dependencies": { - "@chevrotain/cst-dts-gen": "11.0.3", - "@chevrotain/gast": "11.0.3", - "@chevrotain/regexp-to-ast": "11.0.3", - "@chevrotain/types": "11.0.3", - "@chevrotain/utils": "11.0.3", - "lodash-es": "4.17.21" - } - }, - "node_modules/chevrotain-allstar": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz", - "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==", - "license": "MIT", - "dependencies": { - "lodash-es": "^4.17.21" - }, - "peerDependencies": { - "chevrotain": "^11.0.0" - } - }, - "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "license": "MIT", - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/chrome-trace-event": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", - "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", - "license": "MIT", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/clean-css": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", - "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", - "license": "MIT", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 10.0" - } - }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/cli-boxes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-table3": { - "version": "0.6.5", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", - "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", - "license": "MIT", - "dependencies": { - "string-width": "^4.2.0" - }, - "engines": { - "node": "10.* || >= 12.*" - }, - "optionalDependencies": { - "@colors/colors": "1.5.0" - } - }, - "node_modules/cli-table3/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cli-table3/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/clone-deep": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", - "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", - "license": "MIT", - "dependencies": { - "is-plain-object": "^2.0.4", - "kind-of": "^6.0.2", - "shallow-clone": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clsx": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", - "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/collapse-white-space": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", - "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/colord": { - "version": "2.9.3", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", - "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", - "license": "MIT" - }, - "node_modules/colorette": { - "version": "2.0.20", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", - "license": "MIT" - }, - "node_modules/combine-promises": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", - "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/comma-separated-tokens": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/common-path-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", - "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", - "license": "ISC" - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "license": "MIT", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compressible/node_modules/mime-db": { - "version": "1.53.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.53.0.tgz", - "integrity": "sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.5.tgz", - "integrity": "sha512-bQJ0YRck5ak3LgtnpKkiabX5pNF7tMUh1BSy2ZBOTh0Dim0BUu6aPPwByIns6/A5Prh8PufSPerMDUklpzes2Q==", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "compressible": "~2.0.18", - "debug": "2.6.9", - "negotiator": "~0.6.4", - "on-headers": "~1.0.2", - "safe-buffer": "5.2.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/compression/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "license": "MIT" - }, - "node_modules/confbox": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", - "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", - "license": "MIT" - }, - "node_modules/config-chain": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", - "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", - "license": "MIT", - "dependencies": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "node_modules/configstore": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", - "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", - "license": "BSD-2-Clause", - "dependencies": { - "dot-prop": "^6.0.1", - "graceful-fs": "^4.2.6", - "unique-string": "^3.0.0", - "write-file-atomic": "^3.0.3", - "xdg-basedir": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/yeoman/configstore?sponsor=1" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", - "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-3.2.3.tgz", - "integrity": "sha512-I5qxpzLv+sJhTVEoLYNcTW+bThDCPsit0vLNKShZx6rLtpilNpmmeTPaeqJb9ZE9dV3DGaeby6Vuhrw38WjeyQ==", - "license": "MIT", - "engines": { - "node": "^14.18.0 || >=16.10.0" - } - }, - "node_modules/content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "license": "MIT" - }, - "node_modules/cookie": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", - "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", - "license": "MIT" - }, - "node_modules/copy-text-to-clipboard": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", - "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", - "license": "MIT", - "dependencies": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "13.2.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", - "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", - "license": "MIT", - "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.3.0", - "ignore": "^5.2.4", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/core-js": { - "version": "3.39.0", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.39.0.tgz", - "integrity": "sha512-raM0ew0/jJUqkJ0E6e8UDtl+y/7ktFivgWvqw8dNSQeNWoSDLvQ1H/RN3aPXB9tBd4/FhyR4RDPGhsNIMsAn7g==", - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-compat": { - "version": "3.39.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.39.0.tgz", - "integrity": "sha512-VgEUx3VwlExr5no0tXlBt+silBvhTryPwCXRI2Id1PN8WTKu7MreethvddqOubrYxkFdv/RnYrqlv1sFNAUelw==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.24.2" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-pure": { - "version": "3.39.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.39.0.tgz", - "integrity": "sha512-7fEcWwKI4rJinnK+wLTezeg2smbFFdSBP6E2kQZNbnzM2s1rpKQ6aaRteZSSg7FLU3P0HGGVo/gbpfanU36urg==", - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", - "license": "MIT" - }, - "node_modules/cose-base": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", - "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", - "license": "MIT", - "dependencies": { - "layout-base": "^1.0.0" - } - }, - "node_modules/cosmiconfig": { - "version": "8.3.6", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", - "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", - "license": "MIT", - "dependencies": { - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "parse-json": "^5.2.0", - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/d-fischer" - }, - "peerDependencies": { - "typescript": ">=4.9.5" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/crypto-random-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", - "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", - "license": "MIT", - "dependencies": { - "type-fest": "^1.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/crypto-random-string/node_modules/type-fest": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", - "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/css-blank-pseudo": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-7.0.1.tgz", - "integrity": "sha512-jf+twWGDf6LDoXDUode+nc7ZlrqfaNphrBIBrcmeP3D8yw1uPaix1gCC8LUQUGQ6CycuK2opkbFFWFuq/a94ag==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-blank-pseudo/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/css-declaration-sorter": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", - "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", - "license": "ISC", - "engines": { - "node": "^14 || ^16 || >=18" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/css-has-pseudo": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/css-has-pseudo/-/css-has-pseudo-7.0.1.tgz", - "integrity": "sha512-EOcoyJt+OsuKfCADgLT7gADZI5jMzIe/AeI6MeAYKiFBDmNmM7kk46DtSfMj5AohUJisqVzopBpnQTlvbyaBWg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/selector-specificity": "^5.0.0", - "postcss-selector-parser": "^7.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-has-pseudo/node_modules/@csstools/selector-specificity": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", - "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss-selector-parser": "^7.0.0" - } - }, - "node_modules/css-has-pseudo/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/css-loader": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", - "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", - "license": "MIT", - "dependencies": { - "icss-utils": "^5.1.0", - "postcss": "^8.4.33", - "postcss-modules-extract-imports": "^3.1.0", - "postcss-modules-local-by-default": "^4.0.5", - "postcss-modules-scope": "^3.2.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.2.0", - "semver": "^7.5.4" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "@rspack/core": "0.x || 1.x", - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@rspack/core": { - "optional": true - }, - "webpack": { - "optional": true - } - } - }, - "node_modules/css-minimizer-webpack-plugin": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", - "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.18", - "cssnano": "^6.0.1", - "jest-worker": "^29.4.3", - "postcss": "^8.4.24", - "schema-utils": "^4.0.1", - "serialize-javascript": "^6.0.1" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@parcel/css": { - "optional": true - }, - "@swc/css": { - "optional": true - }, - "clean-css": { - "optional": true - }, - "csso": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "lightningcss": { - "optional": true - } - } - }, - "node_modules/css-prefers-color-scheme": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/css-prefers-color-scheme/-/css-prefers-color-scheme-10.0.0.tgz", - "integrity": "sha512-VCtXZAWivRglTZditUfB4StnsWr6YVZ2PRtuxQLKTNRdtAf8tpzaVPE9zXIF3VaSc7O70iK/j1+NXxyQCqdPjQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-select": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-tree": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", - "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.30", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" - } - }, - "node_modules/css-what": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", - "license": "BSD-2-Clause", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/cssdb": { - "version": "8.2.1", - "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-8.2.1.tgz", - "integrity": "sha512-KwEPys7lNsC8OjASI8RrmwOYYDcm0JOW9zQhcV83ejYcQkirTEyeAGui8aO2F5PiS6SLpxuTzl6qlMElIdsgIg==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - } - ], - "license": "MIT-0" - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "license": "MIT", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", - "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", - "license": "MIT", - "dependencies": { - "cssnano-preset-default": "^6.1.2", - "lilconfig": "^3.1.1" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/cssnano" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-preset-advanced": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", - "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", - "license": "MIT", - "dependencies": { - "autoprefixer": "^10.4.19", - "browserslist": "^4.23.0", - "cssnano-preset-default": "^6.1.2", - "postcss-discard-unused": "^6.0.5", - "postcss-merge-idents": "^6.0.3", - "postcss-reduce-idents": "^6.0.3", - "postcss-zindex": "^6.0.2" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-preset-default": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", - "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "css-declaration-sorter": "^7.2.0", - "cssnano-utils": "^4.0.2", - "postcss-calc": "^9.0.1", - "postcss-colormin": "^6.1.0", - "postcss-convert-values": "^6.1.0", - "postcss-discard-comments": "^6.0.2", - "postcss-discard-duplicates": "^6.0.3", - "postcss-discard-empty": "^6.0.3", - "postcss-discard-overridden": "^6.0.2", - "postcss-merge-longhand": "^6.0.5", - "postcss-merge-rules": "^6.1.1", - "postcss-minify-font-values": "^6.1.0", - "postcss-minify-gradients": "^6.0.3", - "postcss-minify-params": "^6.1.0", - "postcss-minify-selectors": "^6.0.4", - "postcss-normalize-charset": "^6.0.2", - "postcss-normalize-display-values": "^6.0.2", - "postcss-normalize-positions": "^6.0.2", - "postcss-normalize-repeat-style": "^6.0.2", - "postcss-normalize-string": "^6.0.2", - "postcss-normalize-timing-functions": "^6.0.2", - "postcss-normalize-unicode": "^6.1.0", - "postcss-normalize-url": "^6.0.2", - "postcss-normalize-whitespace": "^6.0.2", - "postcss-ordered-values": "^6.0.2", - "postcss-reduce-initial": "^6.1.0", - "postcss-reduce-transforms": "^6.0.2", - "postcss-svgo": "^6.0.3", - "postcss-unique-selectors": "^6.0.4" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/cssnano-utils": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", - "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/csso": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", - "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", - "license": "MIT", - "dependencies": { - "css-tree": "~2.2.0" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", - "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.28", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", - "npm": ">=7.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.28", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", - "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==", - "license": "CC0-1.0" - }, - "node_modules/csstype": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", - "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", - "license": "MIT" - }, - "node_modules/cytoscape": { - "version": "3.30.3", - "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.30.3.tgz", - "integrity": "sha512-HncJ9gGJbVtw7YXtIs3+6YAFSSiKsom0amWc33Z7QbylbY2JGMrA0yz4EwrdTScZxnwclXeEZHzO5pxoy0ZE4g==", - "license": "MIT", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/cytoscape-cose-bilkent": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", - "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", - "license": "MIT", - "dependencies": { - "cose-base": "^1.0.0" - }, - "peerDependencies": { - "cytoscape": "^3.2.0" - } - }, - "node_modules/cytoscape-fcose": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", - "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", - "license": "MIT", - "dependencies": { - "cose-base": "^2.2.0" - }, - "peerDependencies": { - "cytoscape": "^3.2.0" - } - }, - "node_modules/cytoscape-fcose/node_modules/cose-base": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", - "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", - "license": "MIT", - "dependencies": { - "layout-base": "^2.0.0" - } - }, - "node_modules/cytoscape-fcose/node_modules/layout-base": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", - "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", - "license": "MIT" - }, - "node_modules/d3": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", - "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", - "license": "ISC", - "dependencies": { - "d3-array": "3", - "d3-axis": "3", - "d3-brush": "3", - "d3-chord": "3", - "d3-color": "3", - "d3-contour": "4", - "d3-delaunay": "6", - "d3-dispatch": "3", - "d3-drag": "3", - "d3-dsv": "3", - "d3-ease": "3", - "d3-fetch": "3", - "d3-force": "3", - "d3-format": "3", - "d3-geo": "3", - "d3-hierarchy": "3", - "d3-interpolate": "3", - "d3-path": "3", - "d3-polygon": "3", - "d3-quadtree": "3", - "d3-random": "3", - "d3-scale": "4", - "d3-scale-chromatic": "3", - "d3-selection": "3", - "d3-shape": "3", - "d3-time": "3", - "d3-time-format": "4", - "d3-timer": "3", - "d3-transition": "3", - "d3-zoom": "3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-array": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", - "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", - "license": "ISC", - "dependencies": { - "internmap": "1 - 2" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-axis": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", - "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-brush": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", - "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", - "license": "ISC", - "dependencies": { - "d3-dispatch": "1 - 3", - "d3-drag": "2 - 3", - "d3-interpolate": "1 - 3", - "d3-selection": "3", - "d3-transition": "3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-chord": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", - "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", - "license": "ISC", - "dependencies": { - "d3-path": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-color": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-contour": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", - "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", - "license": "ISC", - "dependencies": { - "d3-array": "^3.2.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-delaunay": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", - "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", - "license": "ISC", - "dependencies": { - "delaunator": "5" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-dispatch": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", - "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-drag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", - "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", - "license": "ISC", - "dependencies": { - "d3-dispatch": "1 - 3", - "d3-selection": "3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-dsv": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", - "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", - "license": "ISC", - "dependencies": { - "commander": "7", - "iconv-lite": "0.6", - "rw": "1" - }, - "bin": { - "csv2json": "bin/dsv2json.js", - "csv2tsv": "bin/dsv2dsv.js", - "dsv2dsv": "bin/dsv2dsv.js", - "dsv2json": "bin/dsv2json.js", - "json2csv": "bin/json2dsv.js", - "json2dsv": "bin/json2dsv.js", - "json2tsv": "bin/json2dsv.js", - "tsv2csv": "bin/dsv2dsv.js", - "tsv2json": "bin/dsv2json.js" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-dsv/node_modules/commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/d3-ease": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", - "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-fetch": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", - "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", - "license": "ISC", - "dependencies": { - "d3-dsv": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-force": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", - "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", - "license": "ISC", - "dependencies": { - "d3-dispatch": "1 - 3", - "d3-quadtree": "1 - 3", - "d3-timer": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-format": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", - "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-geo": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", - "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", - "license": "ISC", - "dependencies": { - "d3-array": "2.5.0 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-hierarchy": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", - "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-interpolate": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", - "license": "ISC", - "dependencies": { - "d3-color": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-path": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", - "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-polygon": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", - "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-quadtree": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", - "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-random": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", - "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-sankey": { - "version": "0.12.3", - "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", - "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", - "license": "BSD-3-Clause", - "dependencies": { - "d3-array": "1 - 2", - "d3-shape": "^1.2.0" - } - }, - "node_modules/d3-sankey/node_modules/d3-array": { - "version": "2.12.1", - "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", - "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", - "license": "BSD-3-Clause", - "dependencies": { - "internmap": "^1.0.0" - } - }, - "node_modules/d3-sankey/node_modules/d3-path": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", - "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", - "license": "BSD-3-Clause" - }, - "node_modules/d3-sankey/node_modules/d3-shape": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", - "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", - "license": "BSD-3-Clause", - "dependencies": { - "d3-path": "1" - } - }, - "node_modules/d3-sankey/node_modules/internmap": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", - "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", - "license": "ISC" - }, - "node_modules/d3-scale": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", - "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", - "license": "ISC", - "dependencies": { - "d3-array": "2.10.0 - 3", - "d3-format": "1 - 3", - "d3-interpolate": "1.2.0 - 3", - "d3-time": "2.1.1 - 3", - "d3-time-format": "2 - 4" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-scale-chromatic": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", - "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", - "license": "ISC", - "dependencies": { - "d3-color": "1 - 3", - "d3-interpolate": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-selection": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", - "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-shape": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", - "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", - "license": "ISC", - "dependencies": { - "d3-path": "^3.1.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-time": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", - "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", - "license": "ISC", - "dependencies": { - "d3-array": "2 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-time-format": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", - "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", - "license": "ISC", - "dependencies": { - "d3-time": "1 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-timer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", - "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/d3-transition": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", - "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", - "license": "ISC", - "dependencies": { - "d3-color": "1 - 3", - "d3-dispatch": "1 - 3", - "d3-ease": "1 - 3", - "d3-interpolate": "1 - 3", - "d3-timer": "1 - 3" - }, - "engines": { - "node": ">=12" - }, - "peerDependencies": { - "d3-selection": "2 - 3" - } - }, - "node_modules/d3-zoom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", - "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", - "license": "ISC", - "dependencies": { - "d3-dispatch": "1 - 3", - "d3-drag": "2 - 3", - "d3-interpolate": "1 - 3", - "d3-selection": "2 - 3", - "d3-transition": "2 - 3" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/dagre-d3-es": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.11.tgz", - "integrity": "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw==", - "license": "MIT", - "dependencies": { - "d3": "^7.9.0", - "lodash-es": "^4.17.21" - } - }, - "node_modules/dayjs": { - "version": "1.11.13", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", - "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", - "license": "MIT" - }, - "node_modules/debounce": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", - "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==", - "license": "MIT" - }, - "node_modules/debug": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", - "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/decode-named-character-reference": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", - "license": "MIT", - "dependencies": { - "character-entities": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "license": "MIT", - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/decompress-response/node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "license": "MIT", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", - "license": "BSD-2-Clause", - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/define-data-property": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", - "license": "MIT", - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/define-properties": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", - "license": "MIT", - "dependencies": { - "define-data-property": "^1.0.1", - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/delaunator": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", - "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", - "license": "ISC", - "dependencies": { - "robust-predicates": "^3.0.2" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/dequal": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", - "license": "MIT", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", - "license": "MIT" - }, - "node_modules/detect-port": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", - "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", - "license": "MIT", - "dependencies": { - "address": "^1.0.1", - "debug": "4" - }, - "bin": { - "detect": "bin/detect-port.js", - "detect-port": "bin/detect-port.js" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/devlop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", - "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", - "license": "MIT", - "dependencies": { - "dequal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/dns-packet": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", - "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", - "license": "MIT", - "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "license": "MIT", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/dompurify": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz", - "integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==", - "license": "(MPL-2.0 OR Apache-2.0)", - "optionalDependencies": { - "@types/trusted-types": "^2.0.7" - } - }, - "node_modules/domutils": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", - "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/dot-prop": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", - "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", - "license": "MIT", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/dot-prop/node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", - "license": "MIT" - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "license": "MIT" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", - "license": "MIT" - }, - "node_modules/electron-to-chromium": { - "version": "1.5.63", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.63.tgz", - "integrity": "sha512-ddeXKuY9BHo/mw145axlyWjlJ1UBt4WK3AlvkT7W2AbqfRQoacVoRUCF6wL3uIx/8wT9oLKXzI+rFqHHscByaA==", - "license": "ISC" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "license": "MIT" - }, - "node_modules/emojilib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", - "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==", - "license": "MIT" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/emoticon": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", - "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/encodeurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", - "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/enhanced-resolve": { - "version": "5.17.1", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", - "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "license": "MIT", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", - "license": "MIT", - "dependencies": { - "get-intrinsic": "^1.2.4" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.4.tgz", - "integrity": "sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==", - "license": "MIT" - }, - "node_modules/esast-util-from-estree": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", - "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-visit": "^2.0.0", - "unist-util-position-from-estree": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/esast-util-from-js": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", - "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "acorn": "^8.0.0", - "esast-util-from-estree": "^2.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", - "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", - "license": "MIT" - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "license": "BSD-2-Clause", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "license": "BSD-2-Clause", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-util-attach-comments": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", - "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-build-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", - "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "estree-walker": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-is-identifier-name": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", - "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-scope": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", - "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-to-js": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", - "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "astring": "^1.8.0", - "source-map": "^0.7.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-value-to-estree": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.2.1.tgz", - "integrity": "sha512-Vt2UOjyPbNQQgT5eJh+K5aATti0OjCIAGc9SgMdOFYbohuifsWclR74l0iZTJwePMgWYdX1hlVS+dedH9XV8kw==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/remcohaszing" - } - }, - "node_modules/estree-util-visit": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", - "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eta": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", - "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - }, - "funding": { - "url": "https://github.com/eta-dev/eta?sponsor=1" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eval": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", - "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", - "dependencies": { - "@types/node": "*", - "require-like": ">= 0.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", - "license": "MIT" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "license": "MIT", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "license": "MIT", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/express": { - "version": "4.21.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", - "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.3", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.7.1", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.3.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.3", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.10", - "proxy-addr": "~2.0.7", - "qs": "6.13.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.19.0", - "serve-static": "1.16.2", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/express/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.10", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", - "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==", - "license": "MIT" - }, - "node_modules/express/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "license": "MIT" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", - "license": "MIT", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "license": "MIT" - }, - "node_modules/fast-glob": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", - "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "license": "MIT" - }, - "node_modules/fast-uri": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.3.tgz", - "integrity": "sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw==", - "license": "BSD-3-Clause" - }, - "node_modules/fastq": { - "version": "1.17.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", - "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fault": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", - "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", - "license": "MIT", - "dependencies": { - "format": "^0.2.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", - "license": "Apache-2.0", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/feed": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", - "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", - "license": "MIT", - "dependencies": { - "xml-js": "^1.6.11" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "license": "MIT", - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/figures/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/file-loader": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", - "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/file-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/file-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/file-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/finalhandler": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", - "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/find-cache-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", - "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", - "license": "MIT", - "dependencies": { - "common-path-prefix": "^3.0.0", - "pkg-dir": "^7.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/find-up": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", - "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", - "license": "MIT", - "dependencies": { - "locate-path": "^7.1.0", - "path-exists": "^5.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "license": "BSD-3-Clause", - "bin": { - "flat": "cli.js" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", - "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data-encoder": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", - "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", - "license": "MIT", - "engines": { - "node": ">= 14.17" - } - }, - "node_modules/format": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", - "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fraction.js": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", - "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", - "license": "MIT", - "engines": { - "node": "*" - }, - "funding": { - "type": "patreon", - "url": "https://github.com/sponsors/rawify" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-extra": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", - "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", - "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", - "license": "Unlicense" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "license": "ISC" - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-intrinsic": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", - "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-own-enumerable-property-symbols": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", - "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==", - "license": "ISC" - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/github-slugger": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", - "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==", - "license": "ISC" - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", - "license": "BSD-2-Clause" - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", - "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", - "license": "MIT", - "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/global-dirs/node_modules/ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "license": "MIT", - "dependencies": { - "get-intrinsic": "^1.1.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/got": { - "version": "12.6.1", - "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", - "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", - "license": "MIT", - "dependencies": { - "@sindresorhus/is": "^5.2.0", - "@szmarczak/http-timer": "^5.0.1", - "cacheable-lookup": "^7.0.0", - "cacheable-request": "^10.2.8", - "decompress-response": "^6.0.0", - "form-data-encoder": "^2.1.2", - "get-stream": "^6.0.1", - "http2-wrapper": "^2.1.10", - "lowercase-keys": "^3.0.0", - "p-cancelable": "^3.0.0", - "responselike": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/got?sponsor=1" - } - }, - "node_modules/got/node_modules/@sindresorhus/is": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", - "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "license": "ISC" - }, - "node_modules/gray-matter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", - "license": "MIT", - "dependencies": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/gray-matter/node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "license": "MIT", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/gzip-size": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", - "license": "MIT", - "dependencies": { - "duplexer": "^0.1.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/hachure-fill": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz", - "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==", - "license": "MIT" - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", - "license": "MIT" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", - "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", - "license": "MIT", - "dependencies": { - "es-define-property": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-proto": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", - "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-yarn": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", - "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/hast-util-from-parse5": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.2.tgz", - "integrity": "sha512-SfMzfdAi/zAoZ1KkFEyyeXBn7u/ShQrfd675ZEE9M3qj+PMFX05xubzRyF76CCSJu8au9jgVxDV1+okFvgZU4A==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "hastscript": "^9.0.0", - "property-information": "^6.0.0", - "vfile": "^6.0.0", - "vfile-location": "^5.0.0", - "web-namespaces": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-parse-selector": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", - "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-raw": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", - "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "@ungap/structured-clone": "^1.0.0", - "hast-util-from-parse5": "^8.0.0", - "hast-util-to-parse5": "^8.0.0", - "html-void-elements": "^3.0.0", - "mdast-util-to-hast": "^13.0.0", - "parse5": "^7.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-estree": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", - "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-attach-comments": "^3.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^0.4.0", - "unist-util-position": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-estree/node_modules/inline-style-parser": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", - "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==", - "license": "MIT" - }, - "node_modules/hast-util-to-estree/node_modules/style-to-object": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", - "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", - "license": "MIT", - "dependencies": { - "inline-style-parser": "0.1.1" - } - }, - "node_modules/hast-util-to-jsx-runtime": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.2.tgz", - "integrity": "sha512-1ngXYb+V9UT5h+PxNRa1O1FYguZK/XL+gkeqvp7EdHlB9oHUG0eYRo/vY5inBdcqo3RkPMC58/H94HvkbfGdyg==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^1.0.0", - "unist-util-position": "^5.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-to-parse5": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", - "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hast-util-whitespace": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", - "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hastscript": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.0.tgz", - "integrity": "sha512-jzaLBGavEDKHrc5EfFImKN7nZKKBdSLIdGvCwDZ9TfzbF2ffXiov8CKE445L2Z1Ek2t/m4SKQ2j6Ipv7NyUolw==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "hast-util-parse-selector": "^4.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "license": "MIT", - "bin": { - "he": "bin/he" - } - }, - "node_modules/history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "license": "BSD-3-Clause", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hpack.js/node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", - "license": "MIT" - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", - "license": "MIT", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/hpack.js/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "license": "MIT" - }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/htm": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/htm/-/htm-3.1.1.tgz", - "integrity": "sha512-983Vyg8NwUE7JkZ6NmOqpCZ+sh1bKv2iYTlUkzlWmA5JD2acKoxd4KVxbMmxX/85mtfdnDmTFoNKcg5DGAvxNQ==", - "license": "Apache-2.0" - }, - "node_modules/html-entities": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", - "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/mdevils" - }, - { - "type": "patreon", - "url": "https://patreon.com/mdevils" - } - ], - "license": "MIT" - }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "license": "MIT" - }, - "node_modules/html-minifier-terser": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", - "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", - "license": "MIT", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "~5.3.2", - "commander": "^10.0.0", - "entities": "^4.4.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.15.1" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": "^14.13.1 || >=16.0.0" - } - }, - "node_modules/html-minifier-terser/node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "license": "MIT", - "engines": { - "node": ">=14" - } - }, - "node_modules/html-tags": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", - "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/html-void-elements": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", - "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/html-webpack-plugin": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.3.tgz", - "integrity": "sha512-QSf1yjtSAsmf7rYBV7XX86uua4W/vkhIt0xNXKbsi2foEeW7vjJQz4bhnpL3xH+l1ryl1680uNv968Z+X6jSYg==", - "license": "MIT", - "dependencies": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/html-webpack-plugin" - }, - "peerDependencies": { - "@rspack/core": "0.x || 1.x", - "webpack": "^5.20.0" - }, - "peerDependenciesMeta": { - "@rspack/core": { - "optional": true - }, - "webpack": { - "optional": true - } - } - }, - "node_modules/html-webpack-plugin/node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, - "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", - "license": "MIT", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/htmlparser2": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", - "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "entities": "^4.4.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "license": "BSD-2-Clause" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", - "license": "MIT" - }, - "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "license": "MIT", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-parser-js": { - "version": "0.5.8", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", - "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==", - "license": "MIT" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "license": "MIT", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz", - "integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==", - "license": "MIT", - "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" - }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } - } - }, - "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/http2-wrapper": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", - "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", - "license": "MIT", - "dependencies": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.2.0" - }, - "engines": { - "node": ">=10.19.0" - } - }, - "node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "license": "Apache-2.0", - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-utils": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/image-size": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-2.0.2.tgz", - "integrity": "sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==", - "license": "MIT", - "bin": { - "image-size": "bin/image-size.js" - }, - "engines": { - "node": ">=16.x" - } - }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "license": "MIT", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "license": "MIT", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/infima": { - "version": "0.2.0-alpha.45", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.45.tgz", - "integrity": "sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==", - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "license": "ISC" - }, - "node_modules/inline-style-parser": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", - "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==", - "license": "MIT" - }, - "node_modules/internmap": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", - "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, - "node_modules/ipaddr.js": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", - "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/is-alphabetical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", - "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-alphanumerical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", - "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", - "license": "MIT", - "dependencies": { - "is-alphabetical": "^2.0.0", - "is-decimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "license": "MIT" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "license": "MIT", - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", - "license": "MIT", - "dependencies": { - "ci-info": "^3.2.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-core-module": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", - "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==", - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-decimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", - "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-hexadecimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", - "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", - "license": "MIT", - "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-npm": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", - "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "license": "MIT", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", - "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", - "license": "MIT" - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-yarn-global": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", - "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/jest-util": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", - "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", - "license": "MIT", - "dependencies": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", - "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "jest-util": "^29.7.0", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-worker/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/jiti": { - "version": "1.21.6", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", - "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", - "license": "MIT", - "bin": { - "jiti": "bin/jiti.js" - } - }, - "node_modules/joi": { - "version": "17.13.3", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", - "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", - "license": "BSD-3-Clause", - "dependencies": { - "@hapi/hoek": "^9.3.0", - "@hapi/topo": "^5.1.0", - "@sideway/address": "^4.1.5", - "@sideway/formula": "^3.0.1", - "@sideway/pinpoint": "^2.0.0" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "license": "MIT" - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsesc": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", - "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", - "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "license": "MIT" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "license": "MIT" - }, - "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "license": "MIT" - }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "license": "MIT", - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/katex": { - "version": "0.16.11", - "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.11.tgz", - "integrity": "sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==", - "funding": [ - "https://opencollective.com/katex", - "https://github.com/sponsors/katex" - ], - "license": "MIT", - "dependencies": { - "commander": "^8.3.0" - }, - "bin": { - "katex": "cli.js" - } - }, - "node_modules/katex/node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/khroma": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", - "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/kleur": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/kolorist": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", - "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==", - "license": "MIT" - }, - "node_modules/langium": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/langium/-/langium-3.3.1.tgz", - "integrity": "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==", - "license": "MIT", - "dependencies": { - "chevrotain": "~11.0.3", - "chevrotain-allstar": "~0.3.0", - "vscode-languageserver": "~9.0.1", - "vscode-languageserver-textdocument": "~1.0.11", - "vscode-uri": "~3.0.8" - }, - "engines": { - "node": ">=16.0.0" - } - }, - "node_modules/latest-version": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", - "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", - "license": "MIT", - "dependencies": { - "package-json": "^8.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/launch-editor": { - "version": "2.9.1", - "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.9.1.tgz", - "integrity": "sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w==", - "license": "MIT", - "dependencies": { - "picocolors": "^1.0.0", - "shell-quote": "^1.8.1" - } - }, - "node_modules/layout-base": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", - "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", - "license": "MIT" - }, - "node_modules/leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/lilconfig": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", - "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", - "license": "MIT", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antonk52" - } - }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "license": "MIT" - }, - "node_modules/loader-runner": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", - "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", - "license": "MIT", - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/loader-utils": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", - "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", - "license": "MIT", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, - "node_modules/local-pkg": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.1.tgz", - "integrity": "sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==", - "license": "MIT", - "dependencies": { - "mlly": "^1.7.3", - "pkg-types": "^1.2.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/locate-path": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", - "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", - "license": "MIT", - "dependencies": { - "p-locate": "^6.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "license": "MIT" - }, - "node_modules/lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", - "license": "MIT" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", - "license": "MIT" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", - "license": "MIT" - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", - "license": "MIT" - }, - "node_modules/longest-streak": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", - "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "license": "MIT", - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } - }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", - "license": "MIT", - "dependencies": { - "tslib": "^2.0.3" - } - }, - "node_modules/lowercase-keys": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", - "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "license": "ISC", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/lunr-languages": { - "version": "1.14.0", - "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", - "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==", - "license": "MPL-1.1" - }, - "node_modules/mark.js": { - "version": "8.11.1", - "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", - "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==", - "license": "MIT" - }, - "node_modules/markdown-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", - "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", - "license": "MIT", - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/markdown-table": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", - "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/marked": { - "version": "15.0.12", - "resolved": "https://registry.npmjs.org/marked/-/marked-15.0.12.tgz", - "integrity": "sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA==", - "license": "MIT", - "bin": { - "marked": "bin/marked.js" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/mdast-util-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", - "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-find-and-replace": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", - "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "escape-string-regexp": "^5.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mdast-util-from-markdown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", - "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark": "^4.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-decode-string": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-stringify-position": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/mdast-util-frontmatter": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", - "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "escape-string-regexp": "^5.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mdast-util-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", - "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", - "license": "MIT", - "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-gfm-autolink-literal": "^2.0.0", - "mdast-util-gfm-footnote": "^2.0.0", - "mdast-util-gfm-strikethrough": "^2.0.0", - "mdast-util-gfm-table": "^2.0.0", - "mdast-util-gfm-task-list-item": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", - "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "ccount": "^2.0.0", - "devlop": "^1.0.0", - "mdast-util-find-and-replace": "^3.0.0", - "micromark-util-character": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/mdast-util-gfm-footnote": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", - "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-strikethrough": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", - "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", - "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "markdown-table": "^3.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-gfm-task-list-item": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", - "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", - "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", - "license": "MIT", - "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx-expression": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", - "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdx-jsx": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", - "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "ccount": "^2.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-stringify-position": "^4.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-mdxjs-esm": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", - "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", - "license": "MIT", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-phrasing": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", - "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-hast": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", - "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@ungap/structured-clone": "^1.0.0", - "devlop": "^1.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "trim-lines": "^3.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-markdown": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", - "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "longest-streak": "^3.0.0", - "mdast-util-phrasing": "^4.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark-util-classify-character": "^2.0.0", - "micromark-util-decode-string": "^2.0.0", - "unist-util-visit": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdast-util-to-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", - "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/mdn-data": { - "version": "2.0.30", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", - "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", - "license": "CC0-1.0" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", - "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", - "license": "Unlicense", - "dependencies": { - "fs-monkey": "^1.0.4" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", - "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "license": "MIT" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/mermaid": { - "version": "11.6.0", - "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.6.0.tgz", - "integrity": "sha512-PE8hGUy1LDlWIHWBP05SFdqUHGmRcCcK4IzpOKPE35eOw+G9zZgcnMpyunJVUEOgb//KBORPjysKndw8bFLuRg==", - "license": "MIT", - "dependencies": { - "@braintree/sanitize-url": "^7.0.4", - "@iconify/utils": "^2.1.33", - "@mermaid-js/parser": "^0.4.0", - "@types/d3": "^7.4.3", - "cytoscape": "^3.29.3", - "cytoscape-cose-bilkent": "^4.1.0", - "cytoscape-fcose": "^2.2.0", - "d3": "^7.9.0", - "d3-sankey": "^0.12.3", - "dagre-d3-es": "7.0.11", - "dayjs": "^1.11.13", - "dompurify": "^3.2.4", - "katex": "^0.16.9", - "khroma": "^2.1.0", - "lodash-es": "^4.17.21", - "marked": "^15.0.7", - "roughjs": "^4.6.6", - "stylis": "^4.3.6", - "ts-dedent": "^2.2.0", - "uuid": "^11.1.0" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromark": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.1.tgz", - "integrity": "sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "@types/debug": "^4.0.0", - "debug": "^4.0.0", - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-encode": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-subtokenize": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.2.tgz", - "integrity": "sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-destination": "^2.0.0", - "micromark-factory-label": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-factory-title": "^2.0.0", - "micromark-factory-whitespace": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-classify-character": "^2.0.0", - "micromark-util-html-tag-name": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-subtokenize": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-directive": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", - "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-factory-whitespace": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "parse-entities": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-frontmatter": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", - "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", - "license": "MIT", - "dependencies": { - "fault": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", - "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", - "license": "MIT", - "dependencies": { - "micromark-extension-gfm-autolink-literal": "^2.0.0", - "micromark-extension-gfm-footnote": "^2.0.0", - "micromark-extension-gfm-strikethrough": "^2.0.0", - "micromark-extension-gfm-table": "^2.0.0", - "micromark-extension-gfm-tagfilter": "^2.0.0", - "micromark-extension-gfm-task-list-item": "^2.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", - "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-gfm-footnote": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", - "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-gfm-strikethrough": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", - "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-classify-character": "^2.0.0", - "micromark-util-resolve-all": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-gfm-table": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", - "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-gfm-tagfilter": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", - "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", - "license": "MIT", - "dependencies": { - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-task-list-item": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", - "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-mdx-expression": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", - "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-mdx-expression": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-mdx-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.1.tgz", - "integrity": "sha512-vNuFb9czP8QCtAQcEJn0UJQJZA8Dk6DXKBqx+bg/w0WGuSxDxNr7hErW89tHUY31dUW4NqEOWwmEUNhjTFmHkg==", - "license": "MIT", - "dependencies": { - "@types/acorn": "^4.0.0", - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "micromark-factory-mdx-expression": "^2.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-extension-mdx-md": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", - "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", - "license": "MIT", - "dependencies": { - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", - "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", - "license": "MIT", - "dependencies": { - "acorn": "^8.0.0", - "acorn-jsx": "^5.0.0", - "micromark-extension-mdx-expression": "^3.0.0", - "micromark-extension-mdx-jsx": "^3.0.0", - "micromark-extension-mdx-md": "^2.0.0", - "micromark-extension-mdxjs-esm": "^3.0.0", - "micromark-util-combine-extensions": "^2.0.0", - "micromark-util-types": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs-esm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", - "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-core-commonmark": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-position-from-estree": "^2.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-destination": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", - "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-label": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", - "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-label/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-mdx-expression": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.2.tgz", - "integrity": "sha512-5E5I2pFzJyg2CtemqAbcyCktpHXuJbABnsb32wX2U8IQKhhVFBqkcZR5LRm1WVoFqa4kTueZK4abep7wdo9nrw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "devlop": "^1.0.0", - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-events-to-acorn": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-position-from-estree": "^2.0.0", - "vfile-message": "^4.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-space": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", - "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "node_modules/micromark-factory-space/node_modules/micromark-util-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-title": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", - "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-factory-whitespace": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", - "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-factory-space": "^2.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-character": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", - "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^1.0.0", - "micromark-util-types": "^1.0.0" - } - }, - "node_modules/micromark-util-character/node_modules/micromark-util-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", - "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-chunked": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", - "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-classify-character": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", - "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-combine-extensions": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", - "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-chunked": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-numeric-character-reference": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", - "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-decode-string": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", - "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "decode-named-character-reference": "^1.0.0", - "micromark-util-character": "^2.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-encode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", - "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-events-to-acorn": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", - "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "@types/acorn": "^4.0.0", - "@types/estree": "^1.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "estree-util-visit": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "vfile-message": "^4.0.0" - } - }, - "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-html-tag-name": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", - "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-normalize-identifier": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", - "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-resolve-all": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", - "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", - "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-encode": "^2.0.0", - "micromark-util-symbol": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-subtokenize": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.3.tgz", - "integrity": "sha512-VXJJuNxYWSoYL6AJ6OQECCFGhIU2GGHMw8tahogePBrjkG8aCCas3ibkp7RnVOSTClg2is05/R7maAhF1XyQMg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "devlop": "^1.0.0", - "micromark-util-chunked": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-symbol": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", - "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark-util-types": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.1.tgz", - "integrity": "sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromark/node_modules/micromark-factory-space": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", - "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-character": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark/node_modules/micromark-util-character": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", - "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT", - "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" - } - }, - "node_modules/micromark/node_modules/micromark-util-symbol": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", - "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ], - "license": "MIT" - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "license": "MIT", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", - "license": "MIT", - "dependencies": { - "mime-db": "~1.33.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/mimic-response": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", - "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.2.tgz", - "integrity": "sha512-GJuACcS//jtq4kCtd5ii/M0SZf7OZRH+BxdqXZHaJfb8TJiVl+NgQRPwiYt2EuqeSkNydn/7vP+bcE27C5mb9w==", - "license": "MIT", - "dependencies": { - "schema-utils": "^4.0.0", - "tapable": "^2.2.1" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", - "license": "ISC" - }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/mlly": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.7.3.tgz", - "integrity": "sha512-xUsx5n/mN0uQf4V548PKQ+YShA4/IW0KI1dZhrNrPCLG+xizETbHTkOa1f8/xut9JRPp8kQuMnz0oqwkTiLo/A==", - "license": "MIT", - "dependencies": { - "acorn": "^8.14.0", - "pathe": "^1.1.2", - "pkg-types": "^1.2.1", - "ufo": "^1.5.4" - } - }, - "node_modules/mrmime": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", - "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "license": "MIT" - }, - "node_modules/multicast-dns": { - "version": "7.2.5", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", - "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", - "license": "MIT", - "dependencies": { - "dns-packet": "^5.2.2", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/negotiator": { - "version": "0.6.4", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", - "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", - "license": "MIT" - }, - "node_modules/no-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", - "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", - "license": "MIT", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/node-emoji": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", - "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", - "license": "MIT", - "dependencies": { - "@sindresorhus/is": "^4.6.0", - "char-regex": "^1.0.2", - "emojilib": "^2.4.0", - "skin-tone": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/node-forge": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", - "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", - "license": "(BSD-3-Clause OR GPL-2.0)", - "engines": { - "node": ">= 6.13.0" - } - }, - "node_modules/node-releases": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", - "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", - "license": "MIT" - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", - "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "license": "MIT", - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==", - "license": "MIT" - }, - "node_modules/nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/null-loader": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/null-loader/-/null-loader-4.0.1.tgz", - "integrity": "sha512-pxqVbi4U6N26lq+LmgIbB5XATP0VdZKOG25DhHi8btMmJJefGArFyDg1yc4U3hWCJbMqSrw0qyrz1UQX+qYXqg==", - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/null-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/null-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/null-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/null-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", - "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", - "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.5", - "define-properties": "^1.2.1", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", - "license": "MIT" - }, - "node_modules/on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "license": "MIT", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "license": "MIT", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "license": "MIT", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/opener": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", - "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", - "license": "(WTFPL OR MIT)", - "bin": { - "opener": "bin/opener-bin.js" - } - }, - "node_modules/p-cancelable": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", - "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", - "license": "MIT", - "engines": { - "node": ">=12.20" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", - "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", - "license": "MIT", - "dependencies": { - "yocto-queue": "^1.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", - "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", - "license": "MIT", - "dependencies": { - "p-limit": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "license": "MIT", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-queue": { - "version": "6.6.2", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", - "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", - "license": "MIT", - "dependencies": { - "eventemitter3": "^4.0.4", - "p-timeout": "^3.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "license": "MIT", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", - "license": "MIT", - "dependencies": { - "p-finally": "^1.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", - "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", - "license": "MIT", - "dependencies": { - "got": "^12.1.0", - "registry-auth-token": "^5.0.1", - "registry-url": "^6.0.0", - "semver": "^7.3.7" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-manager-detector": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-0.2.4.tgz", - "integrity": "sha512-H/OUu9/zUfP89z1APcBf2X8Us0tt8dUK4lUmKqz12QNXif3DxAs1/YqjGtcutZi1zQqeNQRWr9C+EbQnnvSSFA==", - "license": "MIT" - }, - "node_modules/param-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", - "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", - "license": "MIT", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/parse-entities": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", - "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", - "license": "MIT", - "dependencies": { - "@types/unist": "^2.0.0", - "character-entities": "^2.0.0", - "character-entities-legacy": "^3.0.0", - "character-reference-invalid": "^2.0.0", - "decode-named-character-reference": "^1.0.0", - "is-alphanumerical": "^2.0.0", - "is-decimal": "^2.0.0", - "is-hexadecimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/parse-entities/node_modules/@types/unist": { - "version": "2.0.11", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", - "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", - "license": "MIT" - }, - "node_modules/parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse-numeric-range": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", - "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==", - "license": "ISC" - }, - "node_modules/parse5": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.2.1.tgz", - "integrity": "sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ==", - "license": "MIT", - "dependencies": { - "entities": "^4.5.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz", - "integrity": "sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==", - "license": "MIT", - "dependencies": { - "domhandler": "^5.0.3", - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", - "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/path-data-parser": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz", - "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==", - "license": "MIT" - }, - "node_modules/path-exists": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", - "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", - "license": "MIT", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", - "license": "(WTFPL OR MIT)" - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "license": "MIT" - }, - "node_modules/path-to-regexp": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", - "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", - "license": "MIT", - "dependencies": { - "isarray": "0.0.1" - } - }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/pathe": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", - "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "license": "MIT" - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pkg-dir": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", - "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", - "license": "MIT", - "dependencies": { - "find-up": "^6.3.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-types": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.2.1.tgz", - "integrity": "sha512-sQoqa8alT3nHjGuTjuKgOnvjo4cljkufdtLMnO2LBP/wRwuDlo1tkaEdMxCRhyGRPacv/ztlZgDPm2b7FAmEvw==", - "license": "MIT", - "dependencies": { - "confbox": "^0.1.8", - "mlly": "^1.7.2", - "pathe": "^1.1.2" - } - }, - "node_modules/points-on-curve": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz", - "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==", - "license": "MIT" - }, - "node_modules/points-on-path": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz", - "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==", - "license": "MIT", - "dependencies": { - "path-data-parser": "0.1.0", - "points-on-curve": "0.2.0" - } - }, - "node_modules/postcss": { - "version": "8.4.49", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", - "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.7", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/postcss-attribute-case-insensitive": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-7.0.1.tgz", - "integrity": "sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-attribute-case-insensitive/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-calc": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", - "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.2.2" - } - }, - "node_modules/postcss-clamp": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-clamp/-/postcss-clamp-4.1.0.tgz", - "integrity": "sha512-ry4b1Llo/9zz+PKC+030KUnPITTJAHeOwjfAyyB60eT0AorGLdzp52s31OsPRHRf8NchkgFoG2y6fCfn1IV1Ow==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=7.6.0" - }, - "peerDependencies": { - "postcss": "^8.4.6" - } - }, - "node_modules/postcss-color-functional-notation": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-7.0.6.tgz", - "integrity": "sha512-wLXvm8RmLs14Z2nVpB4CWlnvaWPRcOZFltJSlcbYwSJ1EDZKsKDhPKIMecCnuU054KSmlmubkqczmm6qBPCBhA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-color-hex-alpha": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-10.0.0.tgz", - "integrity": "sha512-1kervM2cnlgPs2a8Vt/Qbe5cQ++N7rkYo/2rz2BkqJZIHQwaVuJgQH38REHrAi4uM0b1fqxMkWYmese94iMp3w==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-color-rebeccapurple": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-10.0.0.tgz", - "integrity": "sha512-JFta737jSP+hdAIEhk1Vs0q0YF5P8fFcj+09pweS8ktuGuZ8pPlykHsk6mPxZ8awDl4TrcxUqJo9l1IhVr/OjQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-colormin": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", - "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0", - "colord": "^2.9.3", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-convert-values": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", - "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-custom-media": { - "version": "11.0.5", - "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-11.0.5.tgz", - "integrity": "sha512-SQHhayVNgDvSAdX9NQ/ygcDQGEY+aSF4b/96z7QUX6mqL5yl/JgG/DywcF6fW9XbnCRE+aVYk+9/nqGuzOPWeQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/cascade-layer-name-parser": "^2.0.4", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/media-query-list-parser": "^4.0.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-custom-properties": { - "version": "14.0.4", - "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-14.0.4.tgz", - "integrity": "sha512-QnW8FCCK6q+4ierwjnmXF9Y9KF8q0JkbgVfvQEMa93x1GT8FvOiUevWCN2YLaOWyByeDX8S6VFbZEeWoAoXs2A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/cascade-layer-name-parser": "^2.0.4", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-custom-selectors": { - "version": "8.0.4", - "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-8.0.4.tgz", - "integrity": "sha512-ASOXqNvDCE0dAJ/5qixxPeL1aOVGHGW2JwSy7HyjWNbnWTQCl+fDc968HY1jCmZI0+BaYT5CxsOiUhavpG/7eg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "@csstools/cascade-layer-name-parser": "^2.0.4", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-custom-selectors/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-dir-pseudo-class": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-9.0.1.tgz", - "integrity": "sha512-tRBEK0MHYvcMUrAuYMEOa0zg9APqirBcgzi6P21OhxtJyJADo/SWBwY1CAwEohQ/6HDaa9jCjLRG7K3PVQYHEA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-dir-pseudo-class/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-discard-comments": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", - "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", - "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-empty": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", - "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", - "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-discard-unused": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", - "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-double-position-gradients": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-6.0.0.tgz", - "integrity": "sha512-JkIGah3RVbdSEIrcobqj4Gzq0h53GG4uqDPsho88SgY84WnpkTpI0k50MFK/sX7XqVisZ6OqUfFnoUO6m1WWdg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-visible": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/postcss-focus-visible/-/postcss-focus-visible-10.0.1.tgz", - "integrity": "sha512-U58wyjS/I1GZgjRok33aE8juW9qQgQUNwTSdxQGuShHzwuYdcklnvK/+qOWX1Q9kr7ysbraQ6ht6r+udansalA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-visible/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-focus-within": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/postcss-focus-within/-/postcss-focus-within-9.0.1.tgz", - "integrity": "sha512-fzNUyS1yOYa7mOjpci/bR+u+ESvdar6hk8XNK/TRR0fiGTp2QT5N+ducP0n3rfH/m9I7H/EQU6lsa2BrgxkEjw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-within/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-font-variant": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-font-variant/-/postcss-font-variant-5.0.0.tgz", - "integrity": "sha512-1fmkBaCALD72CK2a9i468mA/+tr9/1cBxRRMXOUaZqO43oWPR5imcyPjXwuv7PXbCid4ndlP5zWhidQVVa3hmA==", - "license": "MIT", - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-gap-properties": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-gap-properties/-/postcss-gap-properties-6.0.0.tgz", - "integrity": "sha512-Om0WPjEwiM9Ru+VhfEDPZJAKWUd0mV1HmNXqp2C29z80aQ2uP9UVhLc7e3aYMIor/S5cVhoPgYQ7RtfeZpYTRw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-image-set-function": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-image-set-function/-/postcss-image-set-function-7.0.0.tgz", - "integrity": "sha512-QL7W7QNlZuzOwBTeXEmbVckNt1FSmhQtbMRvGGqqU4Nf4xk6KUEQhAoWuMzwbSv5jxiRiSZ5Tv7eiDB9U87znA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/utilities": "^2.0.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-lab-function": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-7.0.6.tgz", - "integrity": "sha512-HPwvsoK7C949vBZ+eMyvH2cQeMr3UREoHvbtra76/UhDuiViZH6pir+z71UaJQohd7VDSVUdR6TkWYKExEc9aQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/css-color-parser": "^3.0.6", - "@csstools/css-parser-algorithms": "^3.0.4", - "@csstools/css-tokenizer": "^3.0.3", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/utilities": "^2.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-loader": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", - "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", - "license": "MIT", - "dependencies": { - "cosmiconfig": "^8.3.5", - "jiti": "^1.20.0", - "semver": "^7.5.4" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "webpack": "^5.0.0" - } - }, - "node_modules/postcss-logical": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-8.0.0.tgz", - "integrity": "sha512-HpIdsdieClTjXLOyYdUPAX/XQASNIwdKt5hoZW08ZOAiI+tbV0ta1oclkpVkW5ANU+xJvk3KkA0FejkjGLXUkg==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-merge-idents": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", - "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", - "license": "MIT", - "dependencies": { - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", - "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^6.1.1" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-merge-rules": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", - "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^4.0.2", - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", - "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-gradients": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", - "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", - "license": "MIT", - "dependencies": { - "colord": "^2.9.3", - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-params": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", - "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-minify-selectors": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", - "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", - "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.1.0.tgz", - "integrity": "sha512-rm0bdSv4jC3BDma3s9H19ZddW0aHX6EoqwDYU2IfZhRN+53QrufTRo2IdkAbRqLx4R2IYbZnbjKKxg4VN5oU9Q==", - "license": "MIT", - "dependencies": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^7.0.0", - "postcss-value-parser": "^4.1.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-modules-scope": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.1.tgz", - "integrity": "sha512-m9jZstCVaqGjTAuny8MdgE88scJnCiQSlSrOWcTQgM2t32UBe+MUmFSO5t7VMSfAf/FJKImAxBav8ooCHJXCJA==", - "license": "ISC", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-scope/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-modules-values": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", - "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", - "license": "ISC", - "dependencies": { - "icss-utils": "^5.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-nesting": { - "version": "13.0.1", - "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-13.0.1.tgz", - "integrity": "sha512-VbqqHkOBOt4Uu3G8Dm8n6lU5+9cJFxiuty9+4rcoyRPO9zZS1JIs6td49VIoix3qYqELHlJIn46Oih9SAKo+yQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/selector-resolve-nested": "^3.0.0", - "@csstools/selector-specificity": "^5.0.0", - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-nesting/node_modules/@csstools/selector-resolve-nested": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-resolve-nested/-/selector-resolve-nested-3.0.0.tgz", - "integrity": "sha512-ZoK24Yku6VJU1gS79a5PFmC8yn3wIapiKmPgun0hZgEI5AOqgH2kiPRsPz1qkGv4HL+wuDLH83yQyk6inMYrJQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss-selector-parser": "^7.0.0" - } - }, - "node_modules/postcss-nesting/node_modules/@csstools/selector-specificity": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", - "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss-selector-parser": "^7.0.0" - } - }, - "node_modules/postcss-nesting/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", - "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", - "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-positions": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", - "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", - "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-string": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", - "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", - "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-unicode": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", - "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-url": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", - "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-normalize-whitespace": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", - "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-opacity-percentage": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-opacity-percentage/-/postcss-opacity-percentage-3.0.0.tgz", - "integrity": "sha512-K6HGVzyxUxd/VgZdX04DCtdwWJ4NGLG212US4/LA1TLAbHgmAsTWVR86o+gGIbFtnTkfOpb9sCRBx8K7HO66qQ==", - "funding": [ - { - "type": "kofi", - "url": "https://ko-fi.com/mrcgrtz" - }, - { - "type": "liberapay", - "url": "https://liberapay.com/mrcgrtz" - } - ], - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-ordered-values": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", - "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", - "license": "MIT", - "dependencies": { - "cssnano-utils": "^4.0.2", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-overflow-shorthand": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-overflow-shorthand/-/postcss-overflow-shorthand-6.0.0.tgz", - "integrity": "sha512-BdDl/AbVkDjoTofzDQnwDdm/Ym6oS9KgmO7Gr+LHYjNWJ6ExORe4+3pcLQsLA9gIROMkiGVjjwZNoL/mpXHd5Q==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-page-break": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/postcss-page-break/-/postcss-page-break-3.0.4.tgz", - "integrity": "sha512-1JGu8oCjVXLa9q9rFTo4MbeeA5FMe00/9C7lN4va606Rdb+HkxXtXsmEDrIraQ11fGz/WvKWa8gMuCKkrXpTsQ==", - "license": "MIT", - "peerDependencies": { - "postcss": "^8" - } - }, - "node_modules/postcss-place": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/postcss-place/-/postcss-place-10.0.0.tgz", - "integrity": "sha512-5EBrMzat2pPAxQNWYavwAfoKfYcTADJ8AXGVPcUZ2UkNloUTWzJQExgrzrDkh3EKzmAx1evfTAzF9I8NGcc+qw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-preset-env": { - "version": "10.1.1", - "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-10.1.1.tgz", - "integrity": "sha512-wqqsnBFD6VIwcHHRbhjTOcOi4qRVlB26RwSr0ordPj7OubRRxdWebv/aLjKLRR8zkZrbxZyuus03nOIgC5elMQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "@csstools/postcss-cascade-layers": "^5.0.1", - "@csstools/postcss-color-function": "^4.0.6", - "@csstools/postcss-color-mix-function": "^3.0.6", - "@csstools/postcss-content-alt-text": "^2.0.4", - "@csstools/postcss-exponential-functions": "^2.0.5", - "@csstools/postcss-font-format-keywords": "^4.0.0", - "@csstools/postcss-gamut-mapping": "^2.0.6", - "@csstools/postcss-gradients-interpolation-method": "^5.0.6", - "@csstools/postcss-hwb-function": "^4.0.6", - "@csstools/postcss-ic-unit": "^4.0.0", - "@csstools/postcss-initial": "^2.0.0", - "@csstools/postcss-is-pseudo-class": "^5.0.1", - "@csstools/postcss-light-dark-function": "^2.0.7", - "@csstools/postcss-logical-float-and-clear": "^3.0.0", - "@csstools/postcss-logical-overflow": "^2.0.0", - "@csstools/postcss-logical-overscroll-behavior": "^2.0.0", - "@csstools/postcss-logical-resize": "^3.0.0", - "@csstools/postcss-logical-viewport-units": "^3.0.3", - "@csstools/postcss-media-minmax": "^2.0.5", - "@csstools/postcss-media-queries-aspect-ratio-number-values": "^3.0.4", - "@csstools/postcss-nested-calc": "^4.0.0", - "@csstools/postcss-normalize-display-values": "^4.0.0", - "@csstools/postcss-oklab-function": "^4.0.6", - "@csstools/postcss-progressive-custom-properties": "^4.0.0", - "@csstools/postcss-random-function": "^1.0.1", - "@csstools/postcss-relative-color-syntax": "^3.0.6", - "@csstools/postcss-scope-pseudo-class": "^4.0.1", - "@csstools/postcss-sign-functions": "^1.1.0", - "@csstools/postcss-stepped-value-functions": "^4.0.5", - "@csstools/postcss-text-decoration-shorthand": "^4.0.1", - "@csstools/postcss-trigonometric-functions": "^4.0.5", - "@csstools/postcss-unset-value": "^4.0.0", - "autoprefixer": "^10.4.19", - "browserslist": "^4.23.1", - "css-blank-pseudo": "^7.0.1", - "css-has-pseudo": "^7.0.1", - "css-prefers-color-scheme": "^10.0.0", - "cssdb": "^8.2.1", - "postcss-attribute-case-insensitive": "^7.0.1", - "postcss-clamp": "^4.1.0", - "postcss-color-functional-notation": "^7.0.6", - "postcss-color-hex-alpha": "^10.0.0", - "postcss-color-rebeccapurple": "^10.0.0", - "postcss-custom-media": "^11.0.5", - "postcss-custom-properties": "^14.0.4", - "postcss-custom-selectors": "^8.0.4", - "postcss-dir-pseudo-class": "^9.0.1", - "postcss-double-position-gradients": "^6.0.0", - "postcss-focus-visible": "^10.0.1", - "postcss-focus-within": "^9.0.1", - "postcss-font-variant": "^5.0.0", - "postcss-gap-properties": "^6.0.0", - "postcss-image-set-function": "^7.0.0", - "postcss-lab-function": "^7.0.6", - "postcss-logical": "^8.0.0", - "postcss-nesting": "^13.0.1", - "postcss-opacity-percentage": "^3.0.0", - "postcss-overflow-shorthand": "^6.0.0", - "postcss-page-break": "^3.0.4", - "postcss-place": "^10.0.0", - "postcss-pseudo-class-any-link": "^10.0.1", - "postcss-replace-overflow-wrap": "^4.0.0", - "postcss-selector-not": "^8.0.1" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-pseudo-class-any-link": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-10.0.1.tgz", - "integrity": "sha512-3el9rXlBOqTFaMFkWDOkHUTQekFIYnaQY55Rsp8As8QQkpiSgIYEcF/6Ond93oHiDsGb4kad8zjt+NPlOC1H0Q==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT-0", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-pseudo-class-any-link/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-reduce-idents": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", - "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-reduce-initial": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", - "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "caniuse-api": "^3.0.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", - "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-replace-overflow-wrap": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-4.0.0.tgz", - "integrity": "sha512-KmF7SBPphT4gPPcKZc7aDkweHiKEEO8cla/GjcBK+ckKxiZslIu3C4GCRW3DNfL0o7yW7kMQu9xlZ1kXRXLXtw==", - "license": "MIT", - "peerDependencies": { - "postcss": "^8.0.3" - } - }, - "node_modules/postcss-selector-not": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-8.0.1.tgz", - "integrity": "sha512-kmVy/5PYVb2UOhy0+LqUYAhKj7DUGDpSWa5LZqlkWJaaAV+dxxsOG3+St0yNLu6vsKD7Dmqx+nWQt0iil89+WA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/csstools" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - ], - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-selector-not/node_modules/postcss-selector-parser": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.0.0.tgz", - "integrity": "sha512-9RbEr1Y7FFfptd/1eEdntyjMwLeghW1bHX9GWjXo19vx4ytPQhANltvVxDggzJl7mnWM+dX28kb6cyS/4iQjlQ==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", - "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", - "license": "MIT", - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-sort-media-queries": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", - "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", - "license": "MIT", - "dependencies": { - "sort-css-media-queries": "2.2.0" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "postcss": "^8.4.23" - } - }, - "node_modules/postcss-svgo": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", - "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "svgo": "^3.2.0" - }, - "engines": { - "node": "^14 || ^16 || >= 18" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-unique-selectors": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", - "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", - "license": "MIT" - }, - "node_modules/postcss-zindex": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", - "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", - "license": "MIT", - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/preact": { - "version": "10.24.3", - "resolved": "https://registry.npmjs.org/preact/-/preact-10.24.3.tgz", - "integrity": "sha512-Z2dPnBnMUfyQfSQ+GBdsGa16hz35YmLmtTLhM169uW944hYL6xzTYkJjC07j+Wosz733pMWx0fgON3JNw1jJQA==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/preact" - } - }, - "node_modules/pretty-error": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", - "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", - "license": "MIT", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^3.0.0" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/prism-react-renderer": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.4.1.tgz", - "integrity": "sha512-ey8Ls/+Di31eqzUxC46h8MksNuGx/n0AAC8uKpwFau4RPDYLuE3EXTp8N8G2vX2N7UC/+IXeNUnlWBGGcAG+Ig==", - "license": "MIT", - "dependencies": { - "@types/prismjs": "^1.26.0", - "clsx": "^2.0.0" - }, - "peerDependencies": { - "react": ">=16.0.0" - } - }, - "node_modules/prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "license": "MIT" - }, - "node_modules/prompts": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", - "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", - "license": "MIT", - "dependencies": { - "kleur": "^3.0.3", - "sisteransi": "^1.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - } - }, - "node_modules/property-information": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", - "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/proto-list": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", - "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", - "license": "ISC" - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "license": "MIT", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/proxy-addr/node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/pupa": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", - "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", - "license": "MIT", - "dependencies": { - "escape-goat": "^4.0.0" - }, - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.0.6" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "license": "MIT", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", - "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/react-dom": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", - "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.2" - }, - "peerDependencies": { - "react": "^18.3.1" - } - }, - "node_modules/react-fast-compare": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", - "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==", - "license": "MIT" - }, - "node_modules/react-helmet-async": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", - "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", - "license": "Apache-2.0", - "dependencies": { - "@babel/runtime": "^7.12.5", - "invariant": "^2.2.4", - "prop-types": "^15.7.2", - "react-fast-compare": "^3.2.0", - "shallowequal": "^1.1.0" - }, - "peerDependencies": { - "react": "^16.6.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" - }, - "node_modules/react-json-view-lite": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-2.4.1.tgz", - "integrity": "sha512-fwFYknRIBxjbFm0kBDrzgBy1xa5tDg2LyXXBepC5f1b+MY3BUClMCsvanMPn089JbV1Eg3nZcrp0VCuH43aXnA==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "react": "^18.0.0 || ^19.0.0" - } - }, - "node_modules/react-loadable": { - "name": "@docusaurus/react-loadable", - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", - "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", - "license": "MIT", - "dependencies": { - "@types/react": "*" - }, - "peerDependencies": { - "react": "*" - } - }, - "node_modules/react-loadable-ssr-addon-v5-slorber": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", - "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.10.3" - }, - "engines": { - "node": ">=10.13.0" - }, - "peerDependencies": { - "react-loadable": "*", - "webpack": ">=4.41.1 || 5.x" - } - }, - "node_modules/react-router": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", - "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-router-config": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", - "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2" - }, - "peerDependencies": { - "react": ">=15", - "react-router": ">=5" - } - }, - "node_modules/react-router-dom": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", - "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.4", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "license": "MIT", - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/recma-build-jsx": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", - "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "estree-util-build-jsx": "^3.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/recma-jsx": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.0.tgz", - "integrity": "sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q==", - "license": "MIT", - "dependencies": { - "acorn-jsx": "^5.0.0", - "estree-util-to-js": "^2.0.0", - "recma-parse": "^1.0.0", - "recma-stringify": "^1.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/recma-parse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", - "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "esast-util-from-js": "^2.0.0", - "unified": "^11.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/recma-stringify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", - "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "estree-util-to-js": "^2.0.0", - "unified": "^11.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", - "license": "MIT" - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz", - "integrity": "sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==", - "license": "MIT", - "dependencies": { - "regenerate": "^1.4.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", - "license": "MIT" - }, - "node_modules/regenerator-transform": { - "version": "0.15.2", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", - "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regexpu-core": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-6.1.1.tgz", - "integrity": "sha512-k67Nb9jvwJcJmVpw0jPttR1/zVfnKf8Km0IPatrU/zJ5XeG3+Slx0xLXs9HByJSzXzrlz5EDvN6yLNMDc2qdnw==", - "license": "MIT", - "dependencies": { - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.2.0", - "regjsgen": "^0.8.0", - "regjsparser": "^0.11.0", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", - "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", - "license": "MIT", - "dependencies": { - "@pnpm/npm-conf": "^2.1.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/registry-url": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", - "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", - "license": "MIT", - "dependencies": { - "rc": "1.2.8" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==", - "license": "MIT" - }, - "node_modules/regjsparser": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.11.2.tgz", - "integrity": "sha512-3OGZZ4HoLJkkAZx/48mTXJNlmqTGOzc0o9OWQPuWpkOlXXPbyN6OafCcoXUnBqE2D3f/T5L+pWc1kdEmnfnRsA==", - "license": "BSD-2-Clause", - "dependencies": { - "jsesc": "~3.0.2" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/rehype-raw": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", - "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "hast-util-raw": "^9.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/rehype-recma": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", - "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "@types/hast": "^3.0.0", - "hast-util-to-estree": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remark-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", - "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-directive": "^3.0.0", - "micromark-extension-directive": "^3.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-emoji": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", - "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.2", - "emoticon": "^4.0.1", - "mdast-util-find-and-replace": "^3.0.1", - "node-emoji": "^2.1.0", - "unified": "^11.0.4" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - } - }, - "node_modules/remark-frontmatter": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", - "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-frontmatter": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-gfm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", - "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-gfm": "^3.0.0", - "micromark-extension-gfm": "^3.0.0", - "remark-parse": "^11.0.0", - "remark-stringify": "^11.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-mdx": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.0.tgz", - "integrity": "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA==", - "license": "MIT", - "dependencies": { - "mdast-util-mdx": "^3.0.0", - "micromark-extension-mdxjs": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-parse": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", - "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-rehype": { - "version": "11.1.1", - "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.1.tgz", - "integrity": "sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==", - "license": "MIT", - "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "mdast-util-to-hast": "^13.0.0", - "unified": "^11.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/remark-stringify": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", - "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", - "license": "MIT", - "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-to-markdown": "^2.0.0", - "unified": "^11.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/renderkid": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", - "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", - "license": "MIT", - "dependencies": { - "css-select": "^4.1.3", - "dom-converter": "^0.2.0", - "htmlparser2": "^6.1.0", - "lodash": "^4.17.21", - "strip-ansi": "^6.0.1" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", - "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", - "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", - "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", - "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", - "license": "MIT", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-like": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", - "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", - "engines": { - "node": "*" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "license": "MIT" - }, - "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.13.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-alpn": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", - "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", - "license": "MIT" - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-pathname": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", - "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==", - "license": "MIT" - }, - "node_modules/responselike": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", - "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", - "license": "MIT", - "dependencies": { - "lowercase-keys": "^3.0.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "deprecated": "Rimraf versions prior to v4 are no longer supported", - "license": "ISC", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/robust-predicates": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", - "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", - "license": "Unlicense" - }, - "node_modules/roughjs": { - "version": "4.6.6", - "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz", - "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==", - "license": "MIT", - "dependencies": { - "hachure-fill": "^0.5.2", - "path-data-parser": "^0.1.0", - "points-on-curve": "^0.2.0", - "points-on-path": "^0.2.1" - } - }, - "node_modules/rtlcss": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", - "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", - "license": "MIT", - "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0", - "postcss": "^8.4.21", - "strip-json-comments": "^3.1.1" - }, - "bin": { - "rtlcss": "bin/rtlcss.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/rw": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", - "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", - "license": "BSD-3-Clause" - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "license": "MIT" - }, - "node_modules/sax": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", - "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==", - "license": "ISC" - }, - "node_modules/scheduler": { - "version": "0.23.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", - "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0" - } - }, - "node_modules/schema-dts": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/schema-dts/-/schema-dts-1.1.5.tgz", - "integrity": "sha512-RJr9EaCmsLzBX2NDiO5Z3ux2BVosNZN5jo0gWgsyKvxKIUL5R3swNvoorulAeL9kLB0iTSX7V6aokhla2m7xbg==", - "license": "Apache-2.0" - }, - "node_modules/schema-utils": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", - "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.9.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.1.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/search-insights": { - "version": "2.17.3", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.3.tgz", - "integrity": "sha512-RQPdCYTa8A68uM2jwxoY842xDhvx3E5LFL1LxvxCNMev4o5mLuokczhzjAgGwUZBAmOKZknArSxLKmXtIi2AxQ==", - "license": "MIT", - "peer": true - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "license": "MIT", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==", - "license": "MIT" - }, - "node_modules/selfsigned": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", - "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", - "license": "MIT", - "dependencies": { - "@types/node-forge": "^1.3.0", - "node-forge": "^1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver": { - "version": "7.6.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", - "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", - "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", - "license": "MIT", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/send": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", - "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/send/node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/send/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serialize-javascript": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", - "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", - "license": "BSD-3-Clause", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/seroval": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/seroval/-/seroval-1.1.1.tgz", - "integrity": "sha512-rqEO6FZk8mv7Hyv4UCj3FD3b6Waqft605TLfsCe/BiaylRpyyMC0b+uA5TJKawX3KzMrdi3wsLbCaLplrQmBvQ==", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/seroval-plugins": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/seroval-plugins/-/seroval-plugins-1.1.1.tgz", - "integrity": "sha512-qNSy1+nUj7hsCOon7AO4wdAIo9P0jrzAMp18XhiOzA6/uO5TKtP7ScozVJ8T293oRIvi5wyCHSM4TrJo/c/GJA==", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "seroval": "^1.0" - } - }, - "node_modules/serve-handler": { - "version": "6.1.6", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", - "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", - "license": "MIT", - "dependencies": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "mime-types": "2.1.18", - "minimatch": "3.1.2", - "path-is-inside": "1.0.2", - "path-to-regexp": "3.3.0", - "range-parser": "1.2.0" - } - }, - "node_modules/serve-handler/node_modules/path-to-regexp": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", - "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", - "license": "MIT" - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", - "license": "MIT", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", - "license": "MIT", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", - "license": "ISC" - }, - "node_modules/serve-index/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "license": "MIT" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", - "license": "ISC" - }, - "node_modules/serve-index/node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-static": { - "version": "1.16.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", - "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", - "license": "MIT", - "dependencies": { - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.19.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-function-length": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", - "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", - "license": "MIT", - "dependencies": { - "define-data-property": "^1.1.4", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", - "license": "ISC" - }, - "node_modules/shallow-clone": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", - "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", - "license": "MIT", - "dependencies": { - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shallowequal": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", - "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==", - "license": "MIT" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/shell-quote": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", - "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.7", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "license": "ISC" - }, - "node_modules/sirv": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", - "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", - "license": "MIT", - "dependencies": { - "@polka/url": "^1.0.0-next.24", - "mrmime": "^2.0.0", - "totalist": "^3.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/sisteransi": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", - "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", - "license": "MIT" - }, - "node_modules/sitemap": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", - "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", - "license": "MIT", - "dependencies": { - "@types/node": "^17.0.5", - "@types/sax": "^1.2.1", - "arg": "^5.0.0", - "sax": "^1.2.4" - }, - "bin": { - "sitemap": "dist/cli.js" - }, - "engines": { - "node": ">=12.0.0", - "npm": ">=5.6.0" - } - }, - "node_modules/sitemap/node_modules/@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==", - "license": "MIT" - }, - "node_modules/skin-tone": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", - "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", - "license": "MIT", - "dependencies": { - "unicode-emoji-modifier-base": "^1.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/snake-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", - "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", - "license": "MIT", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/sockjs": { - "version": "0.3.24", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", - "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", - "license": "MIT", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sockjs/node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "license": "MIT", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/solid-js": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/solid-js/-/solid-js-1.9.3.tgz", - "integrity": "sha512-5ba3taPoZGt9GY3YlsCB24kCg0Lv/rie/HTD4kG6h4daZZz7+yK02xn8Vx8dLYBc9i6Ps5JwAbEiqjmKaLB3Ag==", - "license": "MIT", - "dependencies": { - "csstype": "^3.1.0", - "seroval": "^1.1.0", - "seroval-plugins": "^1.1.0" - } - }, - "node_modules/sort-css-media-queries": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", - "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", - "license": "MIT", - "engines": { - "node": ">= 6.3.0" - } - }, - "node_modules/source-map": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", - "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", - "license": "BSD-3-Clause", - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "license": "MIT", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/space-separated-tokens": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "license": "BSD-3-Clause" - }, - "node_modules/srcset": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", - "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/std-env": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.8.0.tgz", - "integrity": "sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w==", - "license": "MIT" - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/stringify-entities": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", - "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", - "license": "MIT", - "dependencies": { - "character-entities-html4": "^2.0.0", - "character-entities-legacy": "^3.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/stringify-object": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", - "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", - "license": "BSD-2-Clause", - "dependencies": { - "get-own-enumerable-property-symbols": "^3.0.0", - "is-obj": "^1.0.1", - "is-regexp": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/style-to-object": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.8.tgz", - "integrity": "sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==", - "license": "MIT", - "dependencies": { - "inline-style-parser": "0.2.4" - } - }, - "node_modules/stylehacks": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", - "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", - "license": "MIT", - "dependencies": { - "browserslist": "^4.23.0", - "postcss-selector-parser": "^6.0.16" - }, - "engines": { - "node": "^14 || ^16 || >=18.0" - }, - "peerDependencies": { - "postcss": "^8.4.31" - } - }, - "node_modules/stylis": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", - "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", - "license": "MIT" - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/svg-parser": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", - "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==", - "license": "MIT" - }, - "node_modules/svgo": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", - "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", - "license": "MIT", - "dependencies": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^5.1.0", - "css-tree": "^2.3.1", - "css-what": "^6.1.0", - "csso": "^5.0.5", - "picocolors": "^1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=14.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/svgo" - } - }, - "node_modules/svgo/node_modules/commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/tapable": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/terser": { - "version": "5.36.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.36.0.tgz", - "integrity": "sha512-IYV9eNMuFAV4THUspIRXkLakHnV6XO7FEdtKjf/mDyrnqUg9LnlOn6/RwRvM9SZjR4GUq8Nk8zj67FzVARr74w==", - "license": "BSD-2-Clause", - "dependencies": { - "@jridgewell/source-map": "^0.3.3", - "acorn": "^8.8.2", - "commander": "^2.20.0", - "source-map-support": "~0.5.20" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", - "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.20", - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.26.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "uglify-js": { - "optional": true - } - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/terser-webpack-plugin/node_modules/jest-worker": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", - "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", - "license": "MIT", - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/terser-webpack-plugin/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "license": "MIT" - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", - "license": "MIT" - }, - "node_modules/tiny-invariant": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", - "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", - "license": "MIT" - }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.1.tgz", - "integrity": "sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==", - "license": "MIT" - }, - "node_modules/tinypool": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.2.tgz", - "integrity": "sha512-al6n+QEANGFOMf/dmUMsuS5/r9B06uwlyNjZZql/zv8J7ybHCgoihBNORZCY2mzUuAnomQa2JdhyHKzZxPCrFA==", - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "license": "MIT", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/totalist": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", - "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/trim-lines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", - "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/trough": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", - "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/ts-dedent": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", - "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", - "license": "MIT", - "engines": { - "node": ">=6.10" - } - }, - "node_modules/tslib": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "license": "0BSD" - }, - "node_modules/type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "license": "MIT", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/type-is/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "license": "MIT", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/typescript": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", - "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", - "license": "Apache-2.0", - "optional": true, - "peer": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/ufo": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.4.tgz", - "integrity": "sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==", - "license": "MIT" - }, - "node_modules/undici-types": { - "version": "6.19.8", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", - "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", - "license": "MIT" - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", - "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-emoji-modifier-base": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", - "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "license": "MIT", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz", - "integrity": "sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unified": { - "version": "11.0.5", - "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", - "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "bail": "^2.0.0", - "devlop": "^1.0.0", - "extend": "^3.0.0", - "is-plain-obj": "^4.0.0", - "trough": "^2.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unique-string": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", - "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", - "license": "MIT", - "dependencies": { - "crypto-random-string": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/unist-util-is": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", - "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-position": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", - "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-position-from-estree": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", - "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-stringify-position": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", - "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", - "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/unist-util-visit-parents": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", - "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/update-browserslist-db": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", - "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "escalade": "^3.2.0", - "picocolors": "^1.1.0" - }, - "bin": { - "update-browserslist-db": "cli.js" - }, - "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/update-notifier": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", - "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", - "license": "BSD-2-Clause", - "dependencies": { - "boxen": "^7.0.0", - "chalk": "^5.0.1", - "configstore": "^6.0.0", - "has-yarn": "^3.0.0", - "import-lazy": "^4.0.0", - "is-ci": "^3.0.1", - "is-installed-globally": "^0.4.0", - "is-npm": "^6.0.0", - "is-yarn-global": "^0.4.0", - "latest-version": "^7.0.0", - "pupa": "^3.1.0", - "semver": "^7.3.7", - "semver-diff": "^4.0.0", - "xdg-basedir": "^5.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/yeoman/update-notifier?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/boxen": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", - "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", - "license": "MIT", - "dependencies": { - "ansi-align": "^3.0.1", - "camelcase": "^7.0.1", - "chalk": "^5.2.0", - "cli-boxes": "^3.0.0", - "string-width": "^5.1.2", - "type-fest": "^2.13.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.1.0" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/camelcase": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", - "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", - "license": "MIT", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", - "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url-loader": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", - "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "mime-types": "^2.1.27", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "file-loader": "*", - "webpack": "^4.0.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "file-loader": { - "optional": true - } - } - }, - "node_modules/url-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/url-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/url-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/url-loader/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "license": "MIT" - }, - "node_modules/utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==", - "license": "MIT" - }, - "node_modules/utility-types": { - "version": "3.11.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", - "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", - "license": "MIT", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", - "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", - "funding": [ - "https://github.com/sponsors/broofa", - "https://github.com/sponsors/ctavan" - ], - "license": "MIT", - "bin": { - "uuid": "dist/esm/bin/uuid" - } - }, - "node_modules/value-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", - "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==", - "license": "MIT" - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vfile": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", - "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-location": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", - "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vfile-message": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", - "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", - "license": "MIT", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-stringify-position": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/vscode-jsonrpc": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", - "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==", - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/vscode-languageserver": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", - "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", - "license": "MIT", - "dependencies": { - "vscode-languageserver-protocol": "3.17.5" - }, - "bin": { - "installServerIntoExtension": "bin/installServerIntoExtension" - } - }, - "node_modules/vscode-languageserver-protocol": { - "version": "3.17.5", - "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", - "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", - "license": "MIT", - "dependencies": { - "vscode-jsonrpc": "8.2.0", - "vscode-languageserver-types": "3.17.5" - } - }, - "node_modules/vscode-languageserver-textdocument": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz", - "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==", - "license": "MIT" - }, - "node_modules/vscode-languageserver-types": { - "version": "3.17.5", - "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", - "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==", - "license": "MIT" - }, - "node_modules/vscode-uri": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz", - "integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==", - "license": "MIT" - }, - "node_modules/watchpack": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz", - "integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==", - "license": "MIT", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "license": "MIT", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/web-namespaces": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", - "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/webpack": { - "version": "5.96.1", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.96.1.tgz", - "integrity": "sha512-l2LlBSvVZGhL4ZrPwyr8+37AunkcYj5qh8o6u2/2rzoPc8gxFJkLj1WxNgooi9pnoc06jh0BjuXnamM4qlujZA==", - "license": "MIT", - "dependencies": { - "@types/eslint-scope": "^3.7.7", - "@types/estree": "^1.0.6", - "@webassemblyjs/ast": "^1.12.1", - "@webassemblyjs/wasm-edit": "^1.12.1", - "@webassemblyjs/wasm-parser": "^1.12.1", - "acorn": "^8.14.0", - "browserslist": "^4.24.0", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.17.1", - "es-module-lexer": "^1.2.1", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.11", - "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.10", - "watchpack": "^2.4.1", - "webpack-sources": "^3.2.3" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-bundle-analyzer": { - "version": "4.10.2", - "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", - "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", - "license": "MIT", - "dependencies": { - "@discoveryjs/json-ext": "0.5.7", - "acorn": "^8.0.4", - "acorn-walk": "^8.0.0", - "commander": "^7.2.0", - "debounce": "^1.2.1", - "escape-string-regexp": "^4.0.0", - "gzip-size": "^6.0.0", - "html-escaper": "^2.0.2", - "opener": "^1.5.2", - "picocolors": "^1.0.0", - "sirv": "^2.0.3", - "ws": "^7.3.1" - }, - "bin": { - "webpack-bundle-analyzer": "lib/bin/analyzer.js" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/webpack-bundle-analyzer/node_modules/commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/webpack-dev-middleware": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", - "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", - "license": "MIT", - "dependencies": { - "colorette": "^2.0.10", - "memfs": "^3.4.3", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack-dev-server": { - "version": "4.15.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", - "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", - "license": "MIT", - "dependencies": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/express": "^4.17.13", - "@types/serve-index": "^1.9.1", - "@types/serve-static": "^1.13.10", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.5.5", - "ansi-html-community": "^0.0.8", - "bonjour-service": "^1.0.11", - "chokidar": "^3.5.3", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^2.0.0", - "default-gateway": "^6.0.3", - "express": "^4.17.3", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.3", - "ipaddr.js": "^2.0.1", - "launch-editor": "^2.6.0", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "rimraf": "^3.0.2", - "schema-utils": "^4.0.0", - "selfsigned": "^2.1.1", - "serve-index": "^1.9.1", - "sockjs": "^0.3.24", - "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.4", - "ws": "^8.13.0" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.37.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "webpack": { - "optional": true - }, - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", - "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/webpack-merge": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-6.0.1.tgz", - "integrity": "sha512-hXXvrjtx2PLYx4qruKl+kyRSLc52V+cCvMxRjmKwoA+CBbbF5GfIBtR6kCvl0fYGqTUPKB+1ktVmTHqMOzgCBg==", - "license": "MIT", - "dependencies": { - "clone-deep": "^4.0.1", - "flat": "^5.0.2", - "wildcard": "^2.0.1" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/webpack-sources": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", - "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", - "license": "MIT", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/webpack/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/webpack/node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpackbar": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-6.0.1.tgz", - "integrity": "sha512-TnErZpmuKdwWBdMoexjio3KKX6ZtoKHRVvLIU0A47R0VVBDtx3ZyOJDktgYixhoJokZTYTt1Z37OkO9pnGJa9Q==", - "license": "MIT", - "dependencies": { - "ansi-escapes": "^4.3.2", - "chalk": "^4.1.2", - "consola": "^3.2.3", - "figures": "^3.2.0", - "markdown-table": "^2.0.0", - "pretty-time": "^1.1.0", - "std-env": "^3.7.0", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=14.21.3" - }, - "peerDependencies": { - "webpack": "3 || 4 || 5" - } - }, - "node_modules/webpackbar/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/webpackbar/node_modules/markdown-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-2.0.0.tgz", - "integrity": "sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==", - "license": "MIT", - "dependencies": { - "repeat-string": "^1.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/webpackbar/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpackbar/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "license": "Apache-2.0", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "license": "Apache-2.0", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/widest-line": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", - "license": "MIT", - "dependencies": { - "string-width": "^5.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wildcard": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", - "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==", - "license": "MIT" - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "7.5.10", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", - "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", - "license": "MIT", - "engines": { - "node": ">=8.3.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xdg-basedir": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", - "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/xml-js": { - "version": "1.6.11", - "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", - "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", - "license": "MIT", - "dependencies": { - "sax": "^1.2.4" - }, - "bin": { - "xml-js": "bin/cli.js" - } - }, - "node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "license": "ISC" - }, - "node_modules/yocto-queue": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.1.1.tgz", - "integrity": "sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==", - "license": "MIT", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/zwitch": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", - "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", - "license": "MIT", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - } - } -} diff --git a/docs/package.json b/docs/package.json index d44749548..1e4020dd4 100644 --- a/docs/package.json +++ b/docs/package.json @@ -14,26 +14,18 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@cmfcmf/docusaurus-search-local": "1.2.0", - "@docusaurus/core": "3.8.0", - "@docusaurus/preset-classic": "3.8.0", - "@docusaurus/theme-mermaid": "3.8.0", - "@mdx-js/react": "3.1.0", - "asciinema-player": "3.10.0", - "clsx": "2.1.1", - "prism-react-renderer": "2.4.1", - "react": "18.3.1", - "react-dom": "18.3.1" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "3.8.0", - "@docusaurus/types": "3.8.0" - }, - "overrides": { - "@cmfcmf/docusaurus-search-local": { - "@docusaurus/core": "3.8.0", - "cheerio": "1.0.0-rc.12" - } + "@cmfcmf/docusaurus-search-local": "^1.1.0", + "@docusaurus/core": "^2.2.0", + "@docusaurus/module-type-aliases": "^2.2.0", + "@docusaurus/plugin-google-gtag": "^2.4.1", + "@docusaurus/preset-classic": "^2.4.1", + "@docusaurus/theme-mermaid": "^2.4.1", + "@mdx-js/react": "^1.6.22", + "asciinema-player": "^3.5.0", + "clsx": "^1.2.1", + "prism-react-renderer": "^2.0.6", + "react": "^17.0.2", + "react-dom": "^17.0.2" }, "browserslist": { "production": [ diff --git a/docs/screencasts/docker/Dockerfile b/docs/screencasts/docker/Dockerfile index 77810bba7..836c83c65 100644 --- a/docs/screencasts/docker/Dockerfile +++ b/docs/screencasts/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:24.04@sha256:a08e551cb33850e4740772b38217fc1796a66da2506d312abe51acda354ff061 +FROM ubuntu:22.04@sha256:2b7412e6465c3c7fc5bb21d3e6f1917c167358449fecac8176c6e496e5c1f05f # Install requirements RUN apt-get update && apt-get install -y software-properties-common &&\ diff --git a/docs/sidebars.js b/docs/sidebars.js index 21d4ef42e..67f3de87b 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -55,11 +55,6 @@ const sidebars = { label: 'Performance', link: { type: 'doc', id: 'overview/performance/performance' }, items: [ - { - type: 'doc', - label: 'Compute benchmarks', - id: 'overview/performance/compute', - }, { type: 'doc', label: 'I/O benchmarks', @@ -220,11 +215,6 @@ const sidebars = { label: 'Consume SBOMs', id: 'workflows/sbom', }, - { - type: 'doc', - label: 'Reproduce release artifacts', - id: 'workflows/reproducible-builds', - }, { type: 'doc', label: 'Troubleshooting', diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index b9cd96aed..f502dd906 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -7,7 +7,7 @@ /** * Fonts */ -@import url('https://fonts.googleapis.com/css2?family=Inter:wght@100..900&display=swap'); +@import url('https://fonts.googleapis.com/css2?family=Roboto:ital,wght@0,100;0,300;0,400;0,500;0,700;0,900;1,100;1,300;1,400;1,500;1,700;1,900&display=swap'); /* You can override the default Infima variables here. */ :root { --ifm-color-primary: #8B04DD; @@ -17,7 +17,7 @@ --ifm-color-primary-light: #8B04DD; --ifm-color-primary-lighter: #B873F4; --ifm-color-primary-lightest: #E3D2FF; - --ifm-font-family-base: 'Inter', sans-serif; + --ifm-font-family-base: 'Roboto', sans-serif; --ifm-code-font-size: 95%; /* --ifm-footer-background-color: black; --ifm-footer-link-color: white; diff --git a/docs/src/theme/MDXComponents.js b/docs/src/theme/MDXComponents.js index 2a1413d73..a0852811a 100644 --- a/docs/src/theme/MDXComponents.js +++ b/docs/src/theme/MDXComponents.js @@ -10,7 +10,7 @@ export default { ...MDXComponents, // Map the "highlight" tag to our component! // `Highlight` will receive all props that were passed to `highlight` in MDX - Tabs, - TabItem, - AsciinemaWidget, + tabs: Tabs, + tabItem: TabItem, + asciinemaWidget: AsciinemaWidget, }; diff --git a/docs/static/gtagman.js b/docs/static/gtagman.js deleted file mode 100644 index 57bf6717a..000000000 --- a/docs/static/gtagman.js +++ /dev/null @@ -1,5 +0,0 @@ -(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': -new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], -j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= -'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); -})(window,document,'script','dataLayer','GTM-NF9NM7V'); diff --git a/docs/static/img/BannerConstellationanimated.svg b/docs/static/img/BannerConstellationanimated.svg deleted file mode 100644 index f937f69a0..000000000 --- a/docs/static/img/BannerConstellationanimated.svg +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/static/img/banner.svg b/docs/static/img/banner.svg new file mode 100644 index 000000000..cb19bc6e9 --- /dev/null +++ b/docs/static/img/banner.svg @@ -0,0 +1,155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/static/img/concept.svg b/docs/static/img/concept.svg index 2286ff308..cfb9a0039 100644 --- a/docs/static/img/concept.svg +++ b/docs/static/img/concept.svg @@ -1,201 +1,1021 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + diff --git a/docs/styles/config/vocabularies/edgeless/accept.txt b/docs/styles/config/vocabularies/edgeless/accept.txt index 1676e071f..26fa0d0c9 100644 --- a/docs/styles/config/vocabularies/edgeless/accept.txt +++ b/docs/styles/config/vocabularies/edgeless/accept.txt @@ -11,7 +11,6 @@ backend Bazel bootloader Bootstrapper -CLI cloud config CPU @@ -53,7 +52,6 @@ Mbps MicroK8s namespace Nginx -paravisor PCR plaintext proxied diff --git a/docs/versioned_docs/version-2.0/architecture/attestation.md b/docs/versioned_docs/version-2.0/architecture/attestation.md index 92ee2e9a2..443c19639 100644 --- a/docs/versioned_docs/version-2.0/architecture/attestation.md +++ b/docs/versioned_docs/version-2.0/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -149,8 +149,8 @@ The latter means that the value can be generated offline and compared to the one | 12 | ClusterID | Constellation Bootstrapper | Yes | | 13–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -179,8 +179,8 @@ The latter means that the value can be generated offline and compared to the one | 12 | ClusterID | Constellation Bootstrapper | Yes | | 13–23 | Unused |- | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.0/architecture/keys.md b/docs/versioned_docs/version-2.0/architecture/keys.md index ae6044862..cb8c41768 100644 --- a/docs/versioned_docs/version-2.0/architecture/keys.md +++ b/docs/versioned_docs/version-2.0/architecture/keys.md @@ -101,7 +101,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.0/getting-started/first-steps.md b/docs/versioned_docs/version-2.0/getting-started/first-steps.md index caec42ded..9be306396 100644 --- a/docs/versioned_docs/version-2.0/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.0/getting-started/first-steps.md @@ -6,29 +6,29 @@ The following steps guide you through the process of creating a cluster and depl 1. Create the configuration file for your selected cloud provider. - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + This creates the file `constellation-conf.yaml` in your current working directory. 2. Fill in your cloud provider specific information. - - + + You need several resources for the cluster. You can use the following `az` script to create them: @@ -59,8 +59,8 @@ The following steps guide you through the process of creating a cluster and depl Run `constellation config instance-types` to get the list of all supported options. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -106,8 +106,8 @@ The following steps guide you through the process of creating a cluster and depl Run `constellation config instance-types` to get the list of all supported options. - - + + You need a service account for the cluster. You can use the following `gcloud` script to create it: @@ -130,18 +130,18 @@ The following steps guide you through the process of creating a cluster and depl By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines from the N2D family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - * **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. + * **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. + * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -159,8 +159,8 @@ The following steps guide you through the process of creating a cluster and depl Supported are all machines from the N2D family with a minimum of 4 vCPUs. It defaults to `n2d-standard-4` (4 vCPUs, 16 GB RAM), but you can use any other VMs from the same family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + :::info diff --git a/docs/versioned_docs/version-2.0/getting-started/install.md b/docs/versioned_docs/version-2.0/getting-started/install.md index 5945f0405..8e3545c1a 100644 --- a/docs/versioned_docs/version-2.0/getting-started/install.md +++ b/docs/versioned_docs/version-2.0/getting-started/install.md @@ -18,8 +18,8 @@ Make sure the following requirements are met: The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -101,8 +104,8 @@ The CLI makes authenticated calls to the CSP API. Therefore, you need to set up ### Required permissions - - + + You need the following permissions for your user account: @@ -112,8 +115,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -125,8 +128,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + ### Authentication @@ -136,8 +139,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -153,8 +156,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -177,8 +180,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + ## Next steps diff --git a/docs/versioned_docs/version-2.0/overview/clouds.md b/docs/versioned_docs/version-2.0/overview/clouds.md index 0fa4b79f7..c526d956b 100644 --- a/docs/versioned_docs/version-2.0/overview/clouds.md +++ b/docs/versioned_docs/version-2.0/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.0/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.0/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.0/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.0/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.0/workflows/create.md b/docs/versioned_docs/version-2.0/workflows/create.md index a426202e5..357ab6703 100644 --- a/docs/versioned_docs/version-2.0/workflows/create.md +++ b/docs/versioned_docs/version-2.0/workflows/create.md @@ -15,22 +15,22 @@ This step creates the necessary resources for your cluster in your cloud environ Generate a configuration file for your cloud service provider (CSP): - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. [Fill in your CSP-specific information](../getting-started/first-steps.md#create-a-cluster) before you continue. @@ -53,7 +53,7 @@ constellation create --control-plane-nodes 1 --worker-nodes 2 For details on the flags, consult the command help via `constellation create -h`. -*create* stores your cluster's configuration to a file named [`constellation-state.json`](../architecture/orchestration.md#cluster-creation-process) in your current directory. +*create* stores your cluster's configuration to a file named [`constellation-state.json`](../architecture/orchestration.md#installation-process) in your current directory. ## The *init* step diff --git a/docs/versioned_docs/version-2.0/workflows/recovery.md b/docs/versioned_docs/version-2.0/workflows/recovery.md index ca37ca839..4c6010d98 100644 --- a/docs/versioned_docs/version-2.0/workflows/recovery.md +++ b/docs/versioned_docs/version-2.0/workflows/recovery.md @@ -17,8 +17,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -52,8 +52,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. For this, you need its IP address, which you can obtain from the *Overview* page under *Private IP address*. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -88,8 +88,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. For this, you need its IP address, which you can obtain from the **VM Instance** > **network interfaces** table under *Primary internal IP address*. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.0/workflows/scale.md b/docs/versioned_docs/version-2.0/workflows/scale.md index 84773b80c..3318d8aee 100644 --- a/docs/versioned_docs/version-2.0/workflows/scale.md +++ b/docs/versioned_docs/version-2.0/workflows/scale.md @@ -6,23 +6,23 @@ Constellation provides all features of a Kubernetes cluster including scaling an [During cluster initialization](create.md#the-init-step) you can choose to deploy the [cluster autoscaler](https://github.com/kubernetes/autoscaler). It automatically provisions additional worker nodes so that all pods have a place to run. Alternatively, you can choose to manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + ## Control-plane node scaling @@ -30,23 +30,23 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.0/workflows/storage.md b/docs/versioned_docs/version-2.0/workflows/storage.md index 38d77c694..958c73261 100644 --- a/docs/versioned_docs/version-2.0/workflows/storage.md +++ b/docs/versioned_docs/version-2.0/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -45,8 +45,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The following installation guide gives an overview of how to securely use CSI-based cloud storage for persistent volumes in Constellation. - - + + 1. Install the driver: @@ -56,8 +56,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install azuredisk-csi-driver charts/edgeless --namespace kube-system ``` - - + + 1. Install the driver: @@ -66,8 +66,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install gcp-compute-persistent-disk-csi-driver charts/ --namespace kube-system ``` - - + + :::info @@ -138,8 +138,8 @@ The default storage class is responsible for all persistent volume claims that d The previous instructions create a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -185,8 +185,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -232,5 +232,5 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + diff --git a/docs/versioned_docs/version-2.0/workflows/troubleshooting.md b/docs/versioned_docs/version-2.0/workflows/troubleshooting.md index afc9274c6..ba340601b 100644 --- a/docs/versioned_docs/version-2.0/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.0/workflows/troubleshooting.md @@ -8,8 +8,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -19,8 +19,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -35,5 +35,5 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + diff --git a/docs/versioned_docs/version-2.0/workflows/verify-cli.md b/docs/versioned_docs/version-2.0/workflows/verify-cli.md index 52ed24d95..0a52fedd4 100644 --- a/docs/versioned_docs/version-2.0/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.0/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.1/architecture/attestation.md b/docs/versioned_docs/version-2.1/architecture/attestation.md index 92ee2e9a2..443c19639 100644 --- a/docs/versioned_docs/version-2.1/architecture/attestation.md +++ b/docs/versioned_docs/version-2.1/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -149,8 +149,8 @@ The latter means that the value can be generated offline and compared to the one | 12 | ClusterID | Constellation Bootstrapper | Yes | | 13–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -179,8 +179,8 @@ The latter means that the value can be generated offline and compared to the one | 12 | ClusterID | Constellation Bootstrapper | Yes | | 13–23 | Unused |- | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.1/architecture/keys.md b/docs/versioned_docs/version-2.1/architecture/keys.md index ae6044862..cb8c41768 100644 --- a/docs/versioned_docs/version-2.1/architecture/keys.md +++ b/docs/versioned_docs/version-2.1/architecture/keys.md @@ -101,7 +101,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.1/getting-started/first-steps.md b/docs/versioned_docs/version-2.1/getting-started/first-steps.md index 10cf3d7b9..bd9513650 100644 --- a/docs/versioned_docs/version-2.1/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.1/getting-started/first-steps.md @@ -11,29 +11,29 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step 1. Create the configuration file for your selected cloud provider. - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + This creates the file `constellation-conf.yaml` in your current working directory. 2. Fill in your cloud provider specific information. - - + + You need several resources for the cluster. You can use the following `az` script to create them: @@ -64,8 +64,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Run `constellation config instance-types` to get the list of all supported options. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -111,8 +111,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Run `constellation config instance-types` to get the list of all supported options. - - + + You need a service account for the cluster. You can use the following `gcloud` script to create it: @@ -135,18 +135,18 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines from the N2D family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - * **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. + * **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. + * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -164,8 +164,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Supported are all machines from the N2D family with a minimum of 4 vCPUs. It defaults to `n2d-standard-4` (4 vCPUs, 16 GB RAM), but you can use any other VMs from the same family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + :::info diff --git a/docs/versioned_docs/version-2.1/getting-started/install.md b/docs/versioned_docs/version-2.1/getting-started/install.md index d4cf81ff7..56029b806 100644 --- a/docs/versioned_docs/version-2.1/getting-started/install.md +++ b/docs/versioned_docs/version-2.1/getting-started/install.md @@ -18,8 +18,8 @@ Make sure the following requirements are met: The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,8 +108,8 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + You need the following permissions for your user account: @@ -116,8 +119,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -129,8 +132,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + ### Authentication @@ -140,8 +143,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -157,8 +160,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -181,8 +184,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + ## Next steps diff --git a/docs/versioned_docs/version-2.1/overview/clouds.md b/docs/versioned_docs/version-2.1/overview/clouds.md index 0fa4b79f7..c526d956b 100644 --- a/docs/versioned_docs/version-2.1/overview/clouds.md +++ b/docs/versioned_docs/version-2.1/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.1/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.1/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.1/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.1/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.1/workflows/create.md b/docs/versioned_docs/version-2.1/workflows/create.md index a8956d7e4..a567c5d23 100644 --- a/docs/versioned_docs/version-2.1/workflows/create.md +++ b/docs/versioned_docs/version-2.1/workflows/create.md @@ -19,22 +19,22 @@ This step creates the necessary resources for your cluster in your cloud environ Generate a configuration file for your cloud service provider (CSP): - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. [Fill in your CSP-specific information](../getting-started/first-steps.md#create-a-cluster) before you continue. @@ -57,7 +57,7 @@ constellation create --control-plane-nodes 1 --worker-nodes 2 For details on the flags, consult the command help via `constellation create -h`. -*create* stores your cluster's configuration to a file named [`constellation-state.json`](../architecture/orchestration.md#cluster-creation-process) in your current directory. +*create* stores your cluster's configuration to a file named [`constellation-state.json`](../architecture/orchestration.md#installation-process) in your current directory. ## The *init* step diff --git a/docs/versioned_docs/version-2.1/workflows/recovery.md b/docs/versioned_docs/version-2.1/workflows/recovery.md index c55daf413..cde039ea7 100644 --- a/docs/versioned_docs/version-2.1/workflows/recovery.md +++ b/docs/versioned_docs/version-2.1/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.1/workflows/scale.md b/docs/versioned_docs/version-2.1/workflows/scale.md index 669856655..1c8757fe8 100644 --- a/docs/versioned_docs/version-2.1/workflows/scale.md +++ b/docs/versioned_docs/version-2.1/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + ## Control-plane node scaling @@ -72,23 +72,23 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.1/workflows/storage.md b/docs/versioned_docs/version-2.1/workflows/storage.md index 38d77c694..958c73261 100644 --- a/docs/versioned_docs/version-2.1/workflows/storage.md +++ b/docs/versioned_docs/version-2.1/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -45,8 +45,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The following installation guide gives an overview of how to securely use CSI-based cloud storage for persistent volumes in Constellation. - - + + 1. Install the driver: @@ -56,8 +56,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install azuredisk-csi-driver charts/edgeless --namespace kube-system ``` - - + + 1. Install the driver: @@ -66,8 +66,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install gcp-compute-persistent-disk-csi-driver charts/ --namespace kube-system ``` - - + + :::info @@ -138,8 +138,8 @@ The default storage class is responsible for all persistent volume claims that d The previous instructions create a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -185,8 +185,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -232,5 +232,5 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + diff --git a/docs/versioned_docs/version-2.1/workflows/troubleshooting.md b/docs/versioned_docs/version-2.1/workflows/troubleshooting.md index afc9274c6..ba340601b 100644 --- a/docs/versioned_docs/version-2.1/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.1/workflows/troubleshooting.md @@ -8,8 +8,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -19,8 +19,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -35,5 +35,5 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + diff --git a/docs/versioned_docs/version-2.1/workflows/verify-cli.md b/docs/versioned_docs/version-2.1/workflows/verify-cli.md index 52ed24d95..0a52fedd4 100644 --- a/docs/versioned_docs/version-2.1/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.1/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.10/architecture/attestation.md b/docs/versioned_docs/version-2.10/architecture/attestation.md index 592063193..07ac3aa72 100644 --- a/docs/versioned_docs/version-2.10/architecture/attestation.md +++ b/docs/versioned_docs/version-2.10/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.10/architecture/keys.md b/docs/versioned_docs/version-2.10/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.10/architecture/keys.md +++ b/docs/versioned_docs/version-2.10/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.10/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.10/getting-started/first-steps-local.md index a6e825906..de9c66e9b 100644 --- a/docs/versioned_docs/version-2.10/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.10/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -151,8 +151,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -205,8 +205,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -217,8 +217,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -246,8 +246,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.10/getting-started/first-steps.md b/docs/versioned_docs/version-2.10/getting-started/first-steps.md index 4420eb708..0b224e04d 100644 --- a/docs/versioned_docs/version-2.10/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.10/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.10/getting-started/install.md b/docs/versioned_docs/version-2.10/getting-started/install.md index c21ad259c..4debbca9a 100644 --- a/docs/versioned_docs/version-2.10/getting-started/install.md +++ b/docs/versioned_docs/version-2.10/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,91 +148,91 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -278,12 +278,13 @@ The built-in `AdministratorAccess` policy is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). + The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -293,8 +294,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -310,8 +311,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -334,8 +335,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -351,9 +352,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.10/overview/clouds.md b/docs/versioned_docs/version-2.10/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.10/overview/clouds.md +++ b/docs/versioned_docs/version-2.10/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.10/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.10/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.10/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.10/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.10/overview/performance/compute.md b/docs/versioned_docs/version-2.10/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.10/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.10/overview/performance/io.md b/docs/versioned_docs/version-2.10/overview/performance/io.md index 3ae796f8a..dc7cf3d8b 100644 --- a/docs/versioned_docs/version-2.10/overview/performance/io.md +++ b/docs/versioned_docs/version-2.10/overview/performance/io.md @@ -58,7 +58,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.10/overview/performance/performance.md b/docs/versioned_docs/version-2.10/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.10/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.10/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.10/overview/product.md b/docs/versioned_docs/version-2.10/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.10/overview/product.md +++ b/docs/versioned_docs/version-2.10/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.10/workflows/config.md b/docs/versioned_docs/version-2.10/workflows/config.md index edc3c9091..95f95aeec 100644 --- a/docs/versioned_docs/version-2.10/workflows/config.md +++ b/docs/versioned_docs/version-2.10/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -75,8 +75,8 @@ AWS is currently investigating the issue. SNP-based attestation will be enabled as soon as a fix is verified. ::: - - + + Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. @@ -86,6 +86,7 @@ By default, Constellation creates the node groups `control_plane_default` and `w If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. Each node group can be scaled individually. + Consider the following example for AWS: ```yaml @@ -119,9 +120,9 @@ You can use the field `zone` to specify what availability zone nodes of the grou On Azure, this field is empty by default and nodes are automatically spread across availability zones. Consult the documentation of your cloud provider for more information: -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) +- [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) +- [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) +- [GCP](https://cloud.google.com/compute/docs/regions-zones) ## Choosing a Kubernetes version @@ -133,8 +134,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -154,23 +155,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -194,16 +195,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -232,19 +233,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -258,9 +259,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -291,9 +292,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.10/workflows/create.md b/docs/versioned_docs/version-2.10/workflows/create.md index dd56bc8b7..c0e0cd23d 100644 --- a/docs/versioned_docs/version-2.10/workflows/create.md +++ b/docs/versioned_docs/version-2.10/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + ```bash constellation create @@ -35,8 +35,8 @@ constellation create *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -75,8 +75,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.10/workflows/recovery.md b/docs/versioned_docs/version-2.10/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.10/workflows/recovery.md +++ b/docs/versioned_docs/version-2.10/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.10/workflows/sbom.md b/docs/versioned_docs/version-2.10/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.10/workflows/sbom.md +++ b/docs/versioned_docs/version-2.10/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.10/workflows/scale.md b/docs/versioned_docs/version-2.10/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.10/workflows/scale.md +++ b/docs/versioned_docs/version-2.10/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.10/workflows/storage.md b/docs/versioned_docs/version-2.10/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.10/workflows/storage.md +++ b/docs/versioned_docs/version-2.10/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.10/workflows/terminate.md b/docs/versioned_docs/version-2.10/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.10/workflows/terminate.md +++ b/docs/versioned_docs/version-2.10/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.10/workflows/troubleshooting.md b/docs/versioned_docs/version-2.10/workflows/troubleshooting.md index 38c0d87e9..781cae8c5 100644 --- a/docs/versioned_docs/version-2.10/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.10/workflows/troubleshooting.md @@ -55,12 +55,14 @@ When in doubt, check if the encountered [issue is known](https://github.com/edge ::: + :::tip During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config-backup` config map in the `kube-system` namespace. To restore the old attestation config after a failed upgrade, you can copy the attestation config from this resource, put it in your configuration file and retry the upgrade. ::: + You can use the `upgrade apply` command to change measurements of a running cluster: 1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. @@ -82,8 +84,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -93,8 +95,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -109,16 +111,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.10/workflows/trusted-launch.md b/docs/versioned_docs/version-2.10/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.10/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.10/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.10/workflows/verify-cli.md b/docs/versioned_docs/version-2.10/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.10/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.10/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.11/architecture/attestation.md b/docs/versioned_docs/version-2.11/architecture/attestation.md index 592063193..07ac3aa72 100644 --- a/docs/versioned_docs/version-2.11/architecture/attestation.md +++ b/docs/versioned_docs/version-2.11/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.11/architecture/keys.md b/docs/versioned_docs/version-2.11/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.11/architecture/keys.md +++ b/docs/versioned_docs/version-2.11/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md index a6e825906..de9c66e9b 100644 --- a/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -151,8 +151,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -205,8 +205,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -217,8 +217,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -246,8 +246,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.11/getting-started/first-steps.md b/docs/versioned_docs/version-2.11/getting-started/first-steps.md index 9ebe21701..07b7f8410 100644 --- a/docs/versioned_docs/version-2.11/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.11/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.11/getting-started/install.md b/docs/versioned_docs/version-2.11/getting-started/install.md index f134ca3c2..4debbca9a 100644 --- a/docs/versioned_docs/version-2.11/getting-started/install.md +++ b/docs/versioned_docs/version-2.11/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` \[2] -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,91 +148,91 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -278,12 +278,13 @@ The built-in `AdministratorAccess` policy is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). + The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -293,8 +294,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -310,8 +311,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -334,8 +335,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -351,9 +352,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.11/overview/clouds.md b/docs/versioned_docs/version-2.11/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.11/overview/clouds.md +++ b/docs/versioned_docs/version-2.11/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.11/overview/performance/compute.md b/docs/versioned_docs/version-2.11/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.11/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.11/overview/performance/io.md b/docs/versioned_docs/version-2.11/overview/performance/io.md index 3ae796f8a..dc7cf3d8b 100644 --- a/docs/versioned_docs/version-2.11/overview/performance/io.md +++ b/docs/versioned_docs/version-2.11/overview/performance/io.md @@ -58,7 +58,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.11/overview/performance/performance.md b/docs/versioned_docs/version-2.11/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.11/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.11/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.11/overview/product.md b/docs/versioned_docs/version-2.11/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.11/overview/product.md +++ b/docs/versioned_docs/version-2.11/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.11/workflows/config.md b/docs/versioned_docs/version-2.11/workflows/config.md index edc3c9091..95f95aeec 100644 --- a/docs/versioned_docs/version-2.11/workflows/config.md +++ b/docs/versioned_docs/version-2.11/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -75,8 +75,8 @@ AWS is currently investigating the issue. SNP-based attestation will be enabled as soon as a fix is verified. ::: - - + + Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. @@ -86,6 +86,7 @@ By default, Constellation creates the node groups `control_plane_default` and `w If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. Each node group can be scaled individually. + Consider the following example for AWS: ```yaml @@ -119,9 +120,9 @@ You can use the field `zone` to specify what availability zone nodes of the grou On Azure, this field is empty by default and nodes are automatically spread across availability zones. Consult the documentation of your cloud provider for more information: -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) +- [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) +- [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) +- [GCP](https://cloud.google.com/compute/docs/regions-zones) ## Choosing a Kubernetes version @@ -133,8 +134,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -154,23 +155,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -194,16 +195,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -232,19 +233,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -258,9 +259,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -291,9 +292,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.11/workflows/create.md b/docs/versioned_docs/version-2.11/workflows/create.md index 8dd4946de..d2b0adf90 100644 --- a/docs/versioned_docs/version-2.11/workflows/create.md +++ b/docs/versioned_docs/version-2.11/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + ```bash constellation create @@ -35,8 +35,8 @@ constellation create *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -75,8 +75,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step @@ -94,6 +94,6 @@ export KUBECONFIG="$PWD/constellation-admin.conf" 🏁 That's it. You've successfully created a Constellation cluster. -### Troubleshooting +### Troubleshooting In case `init` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.11/workflows/recovery.md b/docs/versioned_docs/version-2.11/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.11/workflows/recovery.md +++ b/docs/versioned_docs/version-2.11/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.11/workflows/sbom.md b/docs/versioned_docs/version-2.11/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.11/workflows/sbom.md +++ b/docs/versioned_docs/version-2.11/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.11/workflows/scale.md b/docs/versioned_docs/version-2.11/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.11/workflows/scale.md +++ b/docs/versioned_docs/version-2.11/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.11/workflows/storage.md b/docs/versioned_docs/version-2.11/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.11/workflows/storage.md +++ b/docs/versioned_docs/version-2.11/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.11/workflows/terminate.md b/docs/versioned_docs/version-2.11/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.11/workflows/terminate.md +++ b/docs/versioned_docs/version-2.11/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.11/workflows/troubleshooting.md b/docs/versioned_docs/version-2.11/workflows/troubleshooting.md index c40e6496e..a3e25a0fe 100644 --- a/docs/versioned_docs/version-2.11/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.11/workflows/troubleshooting.md @@ -101,8 +101,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -112,8 +112,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -128,16 +128,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.11/workflows/trusted-launch.md b/docs/versioned_docs/version-2.11/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.11/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.11/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.11/workflows/verify-cli.md b/docs/versioned_docs/version-2.11/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.11/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.11/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.12/architecture/attestation.md b/docs/versioned_docs/version-2.12/architecture/attestation.md index f9c9ac38e..e37533995 100644 --- a/docs/versioned_docs/version-2.12/architecture/attestation.md +++ b/docs/versioned_docs/version-2.12/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.12/architecture/keys.md b/docs/versioned_docs/version-2.12/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.12/architecture/keys.md +++ b/docs/versioned_docs/version-2.12/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.12/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.12/getting-started/first-steps-local.md index a6e825906..de9c66e9b 100644 --- a/docs/versioned_docs/version-2.12/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.12/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -151,8 +151,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -205,8 +205,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -217,8 +217,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -246,8 +246,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.12/getting-started/first-steps.md b/docs/versioned_docs/version-2.12/getting-started/first-steps.md index 9ebe21701..07b7f8410 100644 --- a/docs/versioned_docs/version-2.12/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.12/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.12/getting-started/install.md b/docs/versioned_docs/version-2.12/getting-started/install.md index 2fabcf0b1..03848d23b 100644 --- a/docs/versioned_docs/version-2.12/getting-started/install.md +++ b/docs/versioned_docs/version-2.12/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,94 +148,94 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.firewalls.update` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroupManagers.update` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.update` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.firewalls.update` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroupManagers.update` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.update` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -281,12 +281,13 @@ The built-in `AdministratorAccess` policy is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). + The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -296,8 +297,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -313,8 +314,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -337,8 +338,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -354,9 +355,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.12/overview/clouds.md b/docs/versioned_docs/version-2.12/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.12/overview/clouds.md +++ b/docs/versioned_docs/version-2.12/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.12/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.12/overview/confidential-kubernetes.md index bff8c3322..ca20df4de 100644 --- a/docs/versioned_docs/version-2.12/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.12/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.12/overview/performance/compute.md b/docs/versioned_docs/version-2.12/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.12/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.12/overview/performance/io.md b/docs/versioned_docs/version-2.12/overview/performance/io.md index 3ae796f8a..dc7cf3d8b 100644 --- a/docs/versioned_docs/version-2.12/overview/performance/io.md +++ b/docs/versioned_docs/version-2.12/overview/performance/io.md @@ -58,7 +58,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.12/overview/performance/performance.md b/docs/versioned_docs/version-2.12/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.12/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.12/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.12/overview/product.md b/docs/versioned_docs/version-2.12/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.12/overview/product.md +++ b/docs/versioned_docs/version-2.12/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.12/workflows/config.md b/docs/versioned_docs/version-2.12/workflows/config.md index edc3c9091..95f95aeec 100644 --- a/docs/versioned_docs/version-2.12/workflows/config.md +++ b/docs/versioned_docs/version-2.12/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -75,8 +75,8 @@ AWS is currently investigating the issue. SNP-based attestation will be enabled as soon as a fix is verified. ::: - - + + Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. @@ -86,6 +86,7 @@ By default, Constellation creates the node groups `control_plane_default` and `w If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. Each node group can be scaled individually. + Consider the following example for AWS: ```yaml @@ -119,9 +120,9 @@ You can use the field `zone` to specify what availability zone nodes of the grou On Azure, this field is empty by default and nodes are automatically spread across availability zones. Consult the documentation of your cloud provider for more information: -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) +- [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) +- [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) +- [GCP](https://cloud.google.com/compute/docs/regions-zones) ## Choosing a Kubernetes version @@ -133,8 +134,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -154,23 +155,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -194,16 +195,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -232,19 +233,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -258,9 +259,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -291,9 +292,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.12/workflows/create.md b/docs/versioned_docs/version-2.12/workflows/create.md index eccb2699a..ab7bf80ea 100644 --- a/docs/versioned_docs/version-2.12/workflows/create.md +++ b/docs/versioned_docs/version-2.12/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + ```bash constellation create @@ -35,8 +35,8 @@ constellation create *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -77,8 +77,8 @@ yq eval '.infrastructure.initSecret ="$CONSTELL_INIT_SECRET"' --inplace constell yq eval '.infrastructure.clusterEndpoint ="$CONSTELL_IP"' --inplace constellation-state.yaml ``` - - + + ## The *init* step @@ -96,6 +96,6 @@ export KUBECONFIG="$PWD/constellation-admin.conf" 🏁 That's it. You've successfully created a Constellation cluster. -### Troubleshooting +### Troubleshooting In case `init` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.12/workflows/recovery.md b/docs/versioned_docs/version-2.12/workflows/recovery.md index f2d5f22c1..955981749 100644 --- a/docs/versioned_docs/version-2.12/workflows/recovery.md +++ b/docs/versioned_docs/version-2.12/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.12/workflows/sbom.md b/docs/versioned_docs/version-2.12/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.12/workflows/sbom.md +++ b/docs/versioned_docs/version-2.12/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.12/workflows/scale.md b/docs/versioned_docs/version-2.12/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.12/workflows/scale.md +++ b/docs/versioned_docs/version-2.12/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.12/workflows/storage.md b/docs/versioned_docs/version-2.12/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.12/workflows/storage.md +++ b/docs/versioned_docs/version-2.12/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.12/workflows/terminate.md b/docs/versioned_docs/version-2.12/workflows/terminate.md index af7dbc1db..14a130d55 100644 --- a/docs/versioned_docs/version-2.12/workflows/terminate.md +++ b/docs/versioned_docs/version-2.12/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-state.yaml constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.12/workflows/troubleshooting.md b/docs/versioned_docs/version-2.12/workflows/troubleshooting.md index c40e6496e..a3e25a0fe 100644 --- a/docs/versioned_docs/version-2.12/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.12/workflows/troubleshooting.md @@ -101,8 +101,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -112,8 +112,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -128,16 +128,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.12/workflows/trusted-launch.md b/docs/versioned_docs/version-2.12/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.12/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.12/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.12/workflows/verify-cli.md b/docs/versioned_docs/version-2.12/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.12/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.12/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.13/architecture/attestation.md b/docs/versioned_docs/version-2.13/architecture/attestation.md index 8408cc5f0..576bc8865 100644 --- a/docs/versioned_docs/version-2.13/architecture/attestation.md +++ b/docs/versioned_docs/version-2.13/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.13/architecture/keys.md b/docs/versioned_docs/version-2.13/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.13/architecture/keys.md +++ b/docs/versioned_docs/version-2.13/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.13/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.13/getting-started/first-steps-local.md index 890a12654..90a7317af 100644 --- a/docs/versioned_docs/version-2.13/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.13/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -152,8 +152,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -206,8 +206,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -218,8 +218,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -247,8 +247,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.13/getting-started/first-steps.md b/docs/versioned_docs/version-2.13/getting-started/first-steps.md index 3ec110064..040be5478 100644 --- a/docs/versioned_docs/version-2.13/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.13/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.13/getting-started/install.md b/docs/versioned_docs/version-2.13/getting-started/install.md index 2fabcf0b1..03848d23b 100644 --- a/docs/versioned_docs/version-2.13/getting-started/install.md +++ b/docs/versioned_docs/version-2.13/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,94 +148,94 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.firewalls.update` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroupManagers.update` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.update` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.firewalls.update` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroupManagers.update` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.update` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -281,12 +281,13 @@ The built-in `AdministratorAccess` policy is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). + The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -296,8 +297,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -313,8 +314,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -337,8 +338,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -354,9 +355,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.13/overview/clouds.md b/docs/versioned_docs/version-2.13/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.13/overview/clouds.md +++ b/docs/versioned_docs/version-2.13/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.13/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.13/overview/confidential-kubernetes.md index bff8c3322..ca20df4de 100644 --- a/docs/versioned_docs/version-2.13/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.13/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.13/overview/performance/compute.md b/docs/versioned_docs/version-2.13/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.13/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.13/overview/performance/io.md b/docs/versioned_docs/version-2.13/overview/performance/io.md index 3ae796f8a..dc7cf3d8b 100644 --- a/docs/versioned_docs/version-2.13/overview/performance/io.md +++ b/docs/versioned_docs/version-2.13/overview/performance/io.md @@ -58,7 +58,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.13/overview/performance/performance.md b/docs/versioned_docs/version-2.13/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.13/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.13/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.13/overview/product.md b/docs/versioned_docs/version-2.13/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.13/overview/product.md +++ b/docs/versioned_docs/version-2.13/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.13/workflows/config.md b/docs/versioned_docs/version-2.13/workflows/config.md index edc3c9091..95f95aeec 100644 --- a/docs/versioned_docs/version-2.13/workflows/config.md +++ b/docs/versioned_docs/version-2.13/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -75,8 +75,8 @@ AWS is currently investigating the issue. SNP-based attestation will be enabled as soon as a fix is verified. ::: - - + + Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. @@ -86,6 +86,7 @@ By default, Constellation creates the node groups `control_plane_default` and `w If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. Each node group can be scaled individually. + Consider the following example for AWS: ```yaml @@ -119,9 +120,9 @@ You can use the field `zone` to specify what availability zone nodes of the grou On Azure, this field is empty by default and nodes are automatically spread across availability zones. Consult the documentation of your cloud provider for more information: -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) +- [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) +- [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) +- [GCP](https://cloud.google.com/compute/docs/regions-zones) ## Choosing a Kubernetes version @@ -133,8 +134,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -154,23 +155,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -194,16 +195,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -232,19 +233,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -258,9 +259,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -291,9 +292,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.13/workflows/create.md b/docs/versioned_docs/version-2.13/workflows/create.md index f347e9f27..605413cac 100644 --- a/docs/versioned_docs/version-2.13/workflows/create.md +++ b/docs/versioned_docs/version-2.13/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + ```bash constellation create @@ -35,8 +35,8 @@ constellation create *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. This provides flexibility in DevOps and can meet potential regulatory requirements. @@ -55,7 +55,7 @@ management tooling of your choice. You need to keep the essential functionality :::info - On Azure, a manual update to the MAA provider's policy is necessary. + On Azure, if the enforcement policy is set to `MAAFallback` in `constellation-config.yaml`, a manual update to the MAA provider's policy is necessary. You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestationURL | jq -r)`, when using the minimal Terraform configuration). ```bash @@ -72,8 +72,8 @@ Fill these outputs into the corresponding fields of the `Infrastructure` block i Continue with [initializing your cluster](#the-apply-step). - - + + ## The *apply* step diff --git a/docs/versioned_docs/version-2.13/workflows/recovery.md b/docs/versioned_docs/version-2.13/workflows/recovery.md index f2d5f22c1..955981749 100644 --- a/docs/versioned_docs/version-2.13/workflows/recovery.md +++ b/docs/versioned_docs/version-2.13/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.13/workflows/sbom.md b/docs/versioned_docs/version-2.13/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.13/workflows/sbom.md +++ b/docs/versioned_docs/version-2.13/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.13/workflows/scale.md b/docs/versioned_docs/version-2.13/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.13/workflows/scale.md +++ b/docs/versioned_docs/version-2.13/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.13/workflows/storage.md b/docs/versioned_docs/version-2.13/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.13/workflows/storage.md +++ b/docs/versioned_docs/version-2.13/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.13/workflows/terminate.md b/docs/versioned_docs/version-2.13/workflows/terminate.md index e9599cb2b..062214c1c 100644 --- a/docs/versioned_docs/version-2.13/workflows/terminate.md +++ b/docs/versioned_docs/version-2.13/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-state.yaml constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.13/workflows/terraform-module.md b/docs/versioned_docs/version-2.13/workflows/terraform-module.md index e38a4cc2c..e0534b9f5 100644 --- a/docs/versioned_docs/version-2.13/workflows/terraform-module.md +++ b/docs/versioned_docs/version-2.13/workflows/terraform-module.md @@ -1,15 +1,12 @@ # Use the Terraform module - You can manage a Constellation cluster through Terraform. The module package is available as part of the [GitHub release](https://github.com/edgelesssys/constellation/releases/). It consists of a convenience module for each cloud service provider (`{csp}-constellation`) that combines the IAM (`infrastructure/{csp}/iam`), infrastructure (`infrastructure/{csp}`), and constellation (`constellation-cluster`) modules. ## Prerequisites - - a Linux / Mac operating system - a Terraform installation of version `v1.4.4` or above ## Quick setup - The convenience module allows setting up a Constellation cluster with a single module. It's easiest to consume the module through a remote source, as shown below. This allows to upgrade the cluster to a newer Constellation version by simply updating the module source. @@ -21,7 +18,6 @@ The files are deleted on `terraform destroy`. ::: 1. Create a directory (workspace) for your Constellation cluster. - ```bash mkdir constellation-workspace cd constellation-workspace @@ -29,9 +25,9 @@ The files are deleted on `terraform destroy`. 1. Create a `main.tf` file to call the CSP specific Constellation module. - + - + ``` module "azure-constellation" { @@ -59,9 +55,9 @@ The files are deleted on `terraform destroy`. } ``` - + - + ``` module "aws-constellation" { @@ -90,9 +86,9 @@ The files are deleted on `terraform destroy`. } ``` - + - + ``` module "gcp-constellation" { @@ -100,11 +96,11 @@ The files are deleted on `terraform destroy`. name = "constell" project = "constell-proj" // replace with your project id service_account_id = "constid" - zone = "europe-west3-a" + zone = "europe-west2-a" node_groups = { control_plane_default = { role = "control-plane" - zone = "europe-west3-a" + zone = "europe-west2-a" instance_type = "n2d-standard-4" disk_size = 30 disk_type = "pd-ssd" @@ -112,7 +108,7 @@ The files are deleted on `terraform destroy`. }, worker_default = { role = "worker" - zone = "europe-west3-a" + zone = "europe-west2-a" instance_type = "n2d-standard-4" disk_size = 30 disk_type = "pd-ssd" @@ -122,29 +118,25 @@ The files are deleted on `terraform destroy`. } ``` - - + + 3. Initialize and apply the module. - ```bash terraform init terraform apply ``` ## Custom setup - If you need to separate IAM and cluster management or need custom infrastructure, you can also call the submodules individually. Look at the respective convenience module (`{csp}-constellation`) for how you can structure the module calls. The submodules are: - - `constellation-cluster`: manages the Constellation cluster - `fetch-image`: translates the Constellation image version to the image ID of the cloud service provider - `infrastructure/{csp}`: contains the cluster infrastructure resources - `infrastructure/iam/{csp}`: contains the IAM resources used within the cluster ## Cluster upgrades - :::tip For general information on cluster upgrades, see [Upgrade your cluster](./upgrade.md). ::: @@ -153,7 +145,6 @@ Using a [remote address as module source](https://developer.hashicorp.com/terraf 1. Update the `` variable inside the `source` field of the module. 2. Upgrade the Terraform module and provider dependencies and apply the Constellation upgrade. - ```bash terraform init -upgrade terraform apply diff --git a/docs/versioned_docs/version-2.13/workflows/troubleshooting.md b/docs/versioned_docs/version-2.13/workflows/troubleshooting.md index e8220c9a2..05f515ed7 100644 --- a/docs/versioned_docs/version-2.13/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.13/workflows/troubleshooting.md @@ -101,8 +101,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -112,8 +112,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -128,16 +128,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.13/workflows/trusted-launch.md b/docs/versioned_docs/version-2.13/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.13/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.13/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.13/workflows/verify-cli.md b/docs/versioned_docs/version-2.13/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.13/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.13/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.14/architecture/attestation.md b/docs/versioned_docs/version-2.14/architecture/attestation.md index 415b41f47..04b85d8ad 100644 --- a/docs/versioned_docs/version-2.14/architecture/attestation.md +++ b/docs/versioned_docs/version-2.14/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,13 +248,13 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the VM and it's vTPM. @@ -275,8 +275,8 @@ You may customize certain parameters for verification of the attestation stateme This is the intermediate certificate for verifying the SEV-SNP report's signature. If it's not specified, the CLI fetches it from the AMD key distribution server. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.14/architecture/keys.md b/docs/versioned_docs/version-2.14/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.14/architecture/keys.md +++ b/docs/versioned_docs/version-2.14/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.14/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.14/getting-started/first-steps-local.md index 98f0302de..052d29eae 100644 --- a/docs/versioned_docs/version-2.14/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.14/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -145,8 +145,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -199,8 +199,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -211,8 +211,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -240,8 +240,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.14/getting-started/first-steps.md b/docs/versioned_docs/version-2.14/getting-started/first-steps.md index e4e6c8e48..c58d4a0ae 100644 --- a/docs/versioned_docs/version-2.14/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.14/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.14/getting-started/install.md b/docs/versioned_docs/version-2.14/getting-started/install.md index 08e2315ef..f2cad8b02 100644 --- a/docs/versioned_docs/version-2.14/getting-started/install.md +++ b/docs/versioned_docs/version-2.14/getting-started/install.md @@ -9,7 +9,7 @@ Make sure the following requirements are met: * Your machine is running Linux or macOS * You have admin rights on your machine * [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +* Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI @@ -22,8 +22,8 @@ If you prefer to use Terraform, you can alternatively use the [Terraform provide The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -39,8 +39,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -56,9 +56,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -74,9 +74,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -92,8 +92,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -109,12 +109,12 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: -* `Microsoft.Attestation` +* `Microsoft.Attestation` \[2] * `Microsoft.Compute` * `Microsoft.Insights` * `Microsoft.ManagedIdentity` @@ -134,7 +134,7 @@ The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md), you need the following permissions: -* `Microsoft.Attestation/attestationProviders/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] * `Microsoft.Compute/virtualMachineScaleSets/*` * `Microsoft.Insights/components/*` * `Microsoft.ManagedIdentity/userAssignedIdentities/*` @@ -152,8 +152,10 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -238,8 +240,8 @@ Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and ` Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -289,8 +291,8 @@ The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -300,8 +302,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -317,8 +319,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -341,8 +343,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -358,9 +360,9 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + ## Next steps diff --git a/docs/versioned_docs/version-2.14/overview/clouds.md b/docs/versioned_docs/version-2.14/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.14/overview/clouds.md +++ b/docs/versioned_docs/version-2.14/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.14/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.14/overview/confidential-kubernetes.md index bff8c3322..ca20df4de 100644 --- a/docs/versioned_docs/version-2.14/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.14/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.14/overview/performance/compute.md b/docs/versioned_docs/version-2.14/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.14/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.14/overview/performance/io.md b/docs/versioned_docs/version-2.14/overview/performance/io.md index 3ae796f8a..dc7cf3d8b 100644 --- a/docs/versioned_docs/version-2.14/overview/performance/io.md +++ b/docs/versioned_docs/version-2.14/overview/performance/io.md @@ -58,7 +58,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.14/overview/performance/performance.md b/docs/versioned_docs/version-2.14/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.14/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.14/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.14/overview/product.md b/docs/versioned_docs/version-2.14/overview/product.md index 02e12e2f3..e31a4658f 100644 --- a/docs/versioned_docs/version-2.14/overview/product.md +++ b/docs/versioned_docs/version-2.14/overview/product.md @@ -6,7 +6,7 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. * **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.14/workflows/config.md b/docs/versioned_docs/version-2.14/workflows/config.md index 5e938c29c..165100b81 100644 --- a/docs/versioned_docs/version-2.14/workflows/config.md +++ b/docs/versioned_docs/version-2.14/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -67,8 +67,8 @@ If you are using the attestation variant `awsNitroTPM`, you can choose any of th The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - + + Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. @@ -125,8 +125,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -146,23 +146,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -186,16 +186,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -224,19 +224,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -250,9 +250,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -283,9 +283,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.14/workflows/create.md b/docs/versioned_docs/version-2.14/workflows/create.md index 6074ebb16..54bc9dcbc 100644 --- a/docs/versioned_docs/version-2.14/workflows/create.md +++ b/docs/versioned_docs/version-2.14/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -27,8 +27,8 @@ If you don't have a cloud subscription, you can also set up a [local Constellati Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - + + ```bash constellation apply @@ -36,8 +36,8 @@ constellation apply `apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. This provides flexibility in DevOps and can meet potential regulatory requirements. @@ -56,7 +56,7 @@ management tooling of your choice. You need to keep the essential functionality :::info - On Azure, a manual update to the MAA provider's policy is necessary. + On Azure, if the enforcement policy is set to `MAAFallback` in `constellation-config.yaml`, a manual update to the MAA provider's policy is necessary. You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). ```bash @@ -77,8 +77,8 @@ With the required cloud resources set up, continue with initializing your cluste constellation apply --skip-phases=infrastructure ``` - - + + Finally, configure `kubectl` for your cluster: diff --git a/docs/versioned_docs/version-2.14/workflows/recovery.md b/docs/versioned_docs/version-2.14/workflows/recovery.md index f2d5f22c1..955981749 100644 --- a/docs/versioned_docs/version-2.14/workflows/recovery.md +++ b/docs/versioned_docs/version-2.14/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.14/workflows/sbom.md b/docs/versioned_docs/version-2.14/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.14/workflows/sbom.md +++ b/docs/versioned_docs/version-2.14/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.14/workflows/scale.md b/docs/versioned_docs/version-2.14/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.14/workflows/scale.md +++ b/docs/versioned_docs/version-2.14/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.14/workflows/storage.md b/docs/versioned_docs/version-2.14/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.14/workflows/storage.md +++ b/docs/versioned_docs/version-2.14/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.14/workflows/terminate.md b/docs/versioned_docs/version-2.14/workflows/terminate.md index 2c45bebe3..58c274bdd 100644 --- a/docs/versioned_docs/version-2.14/workflows/terminate.md +++ b/docs/versioned_docs/version-2.14/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-state.yaml constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.14/workflows/terraform-provider.md b/docs/versioned_docs/version-2.14/workflows/terraform-provider.md index e831ccc9e..7de44a530 100644 --- a/docs/versioned_docs/version-2.14/workflows/terraform-provider.md +++ b/docs/versioned_docs/version-2.14/workflows/terraform-provider.md @@ -21,9 +21,9 @@ This example shows how to set up a Constellation cluster with the reference IAM 2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. 3. Initialize and apply the Terraform configuration. - + - + When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. @@ -57,8 +57,8 @@ This example shows how to set up a Constellation cluster with the reference IAM }; ``` - - + + Initialize the providers and apply the configuration. ```bash @@ -67,8 +67,8 @@ This example shows how to set up a Constellation cluster with the reference IAM ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - + + Initialize the providers and apply the configuration. ```bash @@ -77,8 +77,8 @@ This example shows how to set up a Constellation cluster with the reference IAM ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - + + 4. Connect to the cluster. ```bash diff --git a/docs/versioned_docs/version-2.14/workflows/troubleshooting.md b/docs/versioned_docs/version-2.14/workflows/troubleshooting.md index 64d7d3355..633053e0b 100644 --- a/docs/versioned_docs/version-2.14/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.14/workflows/troubleshooting.md @@ -101,8 +101,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -112,8 +112,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -128,16 +128,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.14/workflows/trusted-launch.md b/docs/versioned_docs/version-2.14/workflows/trusted-launch.md index d6d01d8eb..9bc7e785f 100644 --- a/docs/versioned_docs/version-2.14/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.14/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: diff --git a/docs/versioned_docs/version-2.14/workflows/verify-cli.md b/docs/versioned_docs/version-2.14/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.14/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.14/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.15/architecture/attestation.md b/docs/versioned_docs/version-2.15/architecture/attestation.md index 286b2466d..04b85d8ad 100644 --- a/docs/versioned_docs/version-2.15/architecture/attestation.md +++ b/docs/versioned_docs/version-2.15/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,13 +248,13 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the VM and it's vTPM. @@ -275,8 +275,8 @@ You may customize certain parameters for verification of the attestation stateme This is the intermediate certificate for verifying the SEV-SNP report's signature. If it's not specified, the CLI fetches it from the AMD key distribution server. - - + + ## Cluster attestation @@ -305,7 +305,7 @@ A user can [verify](../workflows/verify-cluster.md) this statement and compare t So far, this page described how an entire Constellation cluster can be verified using hardware attestation capabilities and runtime measurements. The last missing link is how the ground truth in the form of runtime measurements can be securely distributed to the verifying party. -The build process of Constellation images also creates the ground truth runtime measurements. The builds of Constellation images are reproducible and the measurements of an image can be recalculated and verified by everyone. +The build process of Constellation images also creates the ground truth runtime measurements. With every release, Edgeless Systems publishes signed runtime measurements. The CLI executable is also signed by Edgeless Systems. diff --git a/docs/versioned_docs/version-2.15/architecture/keys.md b/docs/versioned_docs/version-2.15/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.15/architecture/keys.md +++ b/docs/versioned_docs/version-2.15/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.15/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.15/getting-started/first-steps-local.md index 98f0302de..052d29eae 100644 --- a/docs/versioned_docs/version-2.15/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.15/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -145,8 +145,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -199,8 +199,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -211,8 +211,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -240,8 +240,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.15/getting-started/first-steps.md b/docs/versioned_docs/version-2.15/getting-started/first-steps.md index 8618fb843..dc867bf77 100644 --- a/docs/versioned_docs/version-2.15/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.15/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -68,21 +68,21 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -109,8 +109,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.15/getting-started/install.md b/docs/versioned_docs/version-2.15/getting-started/install.md index 08e2315ef..f2cad8b02 100644 --- a/docs/versioned_docs/version-2.15/getting-started/install.md +++ b/docs/versioned_docs/version-2.15/getting-started/install.md @@ -9,7 +9,7 @@ Make sure the following requirements are met: * Your machine is running Linux or macOS * You have admin rights on your machine * [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +* Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI @@ -22,8 +22,8 @@ If you prefer to use Terraform, you can alternatively use the [Terraform provide The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -39,8 +39,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -56,9 +56,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -74,9 +74,9 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - + - + 1. Download the CLI: @@ -92,8 +92,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -109,12 +109,12 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: -* `Microsoft.Attestation` +* `Microsoft.Attestation` \[2] * `Microsoft.Compute` * `Microsoft.Insights` * `Microsoft.ManagedIdentity` @@ -134,7 +134,7 @@ The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md), you need the following permissions: -* `Microsoft.Attestation/attestationProviders/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] * `Microsoft.Compute/virtualMachineScaleSets/*` * `Microsoft.Insights/components/*` * `Microsoft.ManagedIdentity/userAssignedIdentities/*` @@ -152,8 +152,10 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -238,8 +240,8 @@ Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and ` Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -289,8 +291,8 @@ The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -300,8 +302,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -317,8 +319,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -341,8 +343,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -358,9 +360,9 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + ## Next steps diff --git a/docs/versioned_docs/version-2.15/overview/clouds.md b/docs/versioned_docs/version-2.15/overview/clouds.md index 934f6c710..8cc42a990 100644 --- a/docs/versioned_docs/version-2.15/overview/clouds.md +++ b/docs/versioned_docs/version-2.15/overview/clouds.md @@ -19,7 +19,7 @@ The following table summarizes the state of features for different infrastructur | **1. Custom images** | Yes | Yes | Yes | Yes | | **2. SEV-SNP or TDX** | Yes | Yes | Yes | Depends on kernel/HV | | **3. Raw guest attestation** | Yes | Yes | Yes | Depends on kernel/HV | -| **4. Reviewable firmware** | No* | No | Yes | Depends on kernel/HV | +| **4. Reviewable firmware** | No | No | Yes | Depends on kernel/HV | | **5. Confidential measured boot** | Yes | No | No | Depends on kernel/HV | ## Microsoft Azure @@ -32,16 +32,15 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. -CVMs with [SEV-SNP enabled are in public preview](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev-snp). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. +CVMs with SEV-SNP enabled are currently in [public preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. Regarding (4), the CVMs still include closed-source firmware. + In the past, Intel and Google have [collaborated](https://cloud.google.com/blog/products/identity-security/rsa-google-intel-confidential-computing-more-secure) to enhance the security of TDX. Recently, Google has announced a [private preview for TDX](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense?hl=en). With TDX on Google, Constellation has a similar TCB and attestation flow as with the current SEV-SNP offering. diff --git a/docs/versioned_docs/version-2.15/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.15/overview/confidential-kubernetes.md index bff8c3322..ca20df4de 100644 --- a/docs/versioned_docs/version-2.15/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.15/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.15/overview/performance/compute.md b/docs/versioned_docs/version-2.15/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.15/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.15/overview/performance/performance.md b/docs/versioned_docs/version-2.15/overview/performance/performance.md index 59bf86602..7f22a693e 100644 --- a/docs/versioned_docs/version-2.15/overview/performance/performance.md +++ b/docs/versioned_docs/version-2.15/overview/performance/performance.md @@ -1,10 +1,18 @@ # Performance analysis of Constellation -This section provides a comprehensive examination of the performance characteristics of Constellation. +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. -## Runtime encryption +## Impact of runtime encryption on performance -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. ## I/O performance benchmarks diff --git a/docs/versioned_docs/version-2.15/overview/product.md b/docs/versioned_docs/version-2.15/overview/product.md index 02e12e2f3..e31a4658f 100644 --- a/docs/versioned_docs/version-2.15/overview/product.md +++ b/docs/versioned_docs/version-2.15/overview/product.md @@ -6,7 +6,7 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. * **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.15/workflows/config.md b/docs/versioned_docs/version-2.15/workflows/config.md index be9cc6a98..a5216510d 100644 --- a/docs/versioned_docs/version-2.15/workflows/config.md +++ b/docs/versioned_docs/version-2.15/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. @@ -67,8 +67,8 @@ If you are using the attestation variant `awsNitroTPM`, you can choose any of th The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - + + Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. @@ -125,8 +125,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -151,23 +151,23 @@ You can find a list of all [regions in Azure's documentation](https://azure.micr Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -191,16 +191,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -236,19 +236,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -262,9 +262,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -295,9 +295,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.15/workflows/create.md b/docs/versioned_docs/version-2.15/workflows/create.md index 6074ebb16..54bc9dcbc 100644 --- a/docs/versioned_docs/version-2.15/workflows/create.md +++ b/docs/versioned_docs/version-2.15/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -27,8 +27,8 @@ If you don't have a cloud subscription, you can also set up a [local Constellati Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - + + ```bash constellation apply @@ -36,8 +36,8 @@ constellation apply `apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. This provides flexibility in DevOps and can meet potential regulatory requirements. @@ -56,7 +56,7 @@ management tooling of your choice. You need to keep the essential functionality :::info - On Azure, a manual update to the MAA provider's policy is necessary. + On Azure, if the enforcement policy is set to `MAAFallback` in `constellation-config.yaml`, a manual update to the MAA provider's policy is necessary. You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). ```bash @@ -77,8 +77,8 @@ With the required cloud resources set up, continue with initializing your cluste constellation apply --skip-phases=infrastructure ``` - - + + Finally, configure `kubectl` for your cluster: diff --git a/docs/versioned_docs/version-2.15/workflows/recovery.md b/docs/versioned_docs/version-2.15/workflows/recovery.md index 592ab74af..9396bf8f2 100644 --- a/docs/versioned_docs/version-2.15/workflows/recovery.md +++ b/docs/versioned_docs/version-2.15/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via serial console output. In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.15/workflows/sbom.md b/docs/versioned_docs/version-2.15/workflows/sbom.md index 6c1702dee..9ef6eb65c 100644 --- a/docs/versioned_docs/version-2.15/workflows/sbom.md +++ b/docs/versioned_docs/version-2.15/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -11,15 +11,13 @@ SBOMs for Constellation are generated using [Syft](https://github.com/anchore/sy :::note The public key for Edgeless Systems' long-term code-signing key is: - ``` -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +38,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.15/workflows/scale.md b/docs/versioned_docs/version-2.15/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.15/workflows/scale.md +++ b/docs/versioned_docs/version-2.15/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.15/workflows/storage.md b/docs/versioned_docs/version-2.15/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.15/workflows/storage.md +++ b/docs/versioned_docs/version-2.15/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.15/workflows/terminate.md b/docs/versioned_docs/version-2.15/workflows/terminate.md index 2c45bebe3..58c274bdd 100644 --- a/docs/versioned_docs/version-2.15/workflows/terminate.md +++ b/docs/versioned_docs/version-2.15/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-state.yaml constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.15/workflows/terraform-provider.md b/docs/versioned_docs/version-2.15/workflows/terraform-provider.md index e831ccc9e..7de44a530 100644 --- a/docs/versioned_docs/version-2.15/workflows/terraform-provider.md +++ b/docs/versioned_docs/version-2.15/workflows/terraform-provider.md @@ -21,9 +21,9 @@ This example shows how to set up a Constellation cluster with the reference IAM 2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. 3. Initialize and apply the Terraform configuration. - + - + When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. @@ -57,8 +57,8 @@ This example shows how to set up a Constellation cluster with the reference IAM }; ``` - - + + Initialize the providers and apply the configuration. ```bash @@ -67,8 +67,8 @@ This example shows how to set up a Constellation cluster with the reference IAM ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - + + Initialize the providers and apply the configuration. ```bash @@ -77,8 +77,8 @@ This example shows how to set up a Constellation cluster with the reference IAM ``` Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - + + 4. Connect to the cluster. ```bash diff --git a/docs/versioned_docs/version-2.15/workflows/trusted-launch.md b/docs/versioned_docs/version-2.15/workflows/trusted-launch.md index d6d01d8eb..9bc7e785f 100644 --- a/docs/versioned_docs/version-2.15/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.15/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: diff --git a/docs/versioned_docs/version-2.15/workflows/verify-cli.md b/docs/versioned_docs/version-2.15/workflows/verify-cli.md index e33569d37..1280c51b0 100644 --- a/docs/versioned_docs/version-2.15/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.15/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,11 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.15/workflows/verify-cluster.md b/docs/versioned_docs/version-2.15/workflows/verify-cluster.md index b6595ebf2..20d416790 100644 --- a/docs/versioned_docs/version-2.15/workflows/verify-cluster.md +++ b/docs/versioned_docs/version-2.15/workflows/verify-cluster.md @@ -88,7 +88,6 @@ The `verify` command also allows you to verify any Constellation deployment that * The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. * The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. For example: diff --git a/docs/versioned_docs/version-2.16/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.16/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.16/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/concept-constellation.svg b/docs/versioned_docs/version-2.16/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.16/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.16/_media/concept-managed.svg b/docs/versioned_docs/version-2.16/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.16/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.16/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.16/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.16/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.16/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.16/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.16/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.16/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.16/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.16/_media/tcb.svg b/docs/versioned_docs/version-2.16/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.16/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.16/architecture/attestation.md b/docs/versioned_docs/version-2.16/architecture/attestation.md deleted file mode 100644 index a07b35e5a..000000000 --- a/docs/versioned_docs/version-2.16/architecture/attestation.md +++ /dev/null @@ -1,394 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for GCP. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.16/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.16/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.16/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.16/architecture/images.md b/docs/versioned_docs/version-2.16/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.16/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.16/architecture/keys.md b/docs/versioned_docs/version-2.16/architecture/keys.md deleted file mode 100644 index 553d9d4e2..000000000 --- a/docs/versioned_docs/version-2.16/architecture/keys.md +++ /dev/null @@ -1,131 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). -Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.16/architecture/microservices.md b/docs/versioned_docs/version-2.16/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.16/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.16/architecture/networking.md b/docs/versioned_docs/version-2.16/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.16/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.16/architecture/observability.md b/docs/versioned_docs/version-2.16/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.16/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.16/architecture/orchestration.md b/docs/versioned_docs/version-2.16/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.16/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.16/architecture/overview.md b/docs/versioned_docs/version-2.16/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.16/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.16/architecture/versions.md b/docs/versioned_docs/version-2.16/architecture/versions.md deleted file mode 100644 index fbdda5a57..000000000 --- a/docs/versioned_docs/version-2.16/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.27.9 -* v1.28.5 -* v1.29.0 diff --git a/docs/versioned_docs/version-2.16/getting-started/examples.md b/docs/versioned_docs/version-2.16/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.16/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.16/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.16/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.16/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.16/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.16/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.16/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.16/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.16/getting-started/first-steps.md b/docs/versioned_docs/version-2.16/getting-started/first-steps.md deleted file mode 100644 index 738868551..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/first-steps.md +++ /dev/null @@ -1,229 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.16/getting-started/install.md b/docs/versioned_docs/version-2.16/getting-started/install.md deleted file mode 100644 index d52e43476..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/install.md +++ /dev/null @@ -1,429 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file under `~/.config/openstack/clouds.yaml` (`%AppData%\openstack\clouds.yaml` on Windows) with the credentials from the User Access Token - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_STACKIT_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.16/getting-started/marketplaces.md b/docs/versioned_docs/version-2.16/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.16/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.16/intro.md b/docs/versioned_docs/version-2.16/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.16/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.16/overview/clouds.md b/docs/versioned_docs/version-2.16/overview/clouds.md deleted file mode 100644 index 5f19d8877..000000000 --- a/docs/versioned_docs/version-2.16/overview/clouds.md +++ /dev/null @@ -1,67 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures as of June 2023. - -| **Feature** | **Azure** | **GCP** | **AWS** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|-----------|---------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | No* | No | Yes | No | Depends on kernel/HV | -| **5. Confidential measured boot** | Yes | No | No | No | Depends on kernel/HV | - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to remote-attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the remote-attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. -CVMs with [SEV-SNP enabled are in public preview](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev-snp). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -In the past, Intel and Google have [collaborated](https://cloud.google.com/blog/products/identity-security/rsa-google-intel-confidential-computing-more-secure) to enhance the security of TDX. -Recently, Google has announced a [private preview for TDX](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense?hl=en). -With TDX on Google, Constellation has a similar TCB and attestation flow as with the current SEV-SNP offering. - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to remote-attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.16/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.16/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.16/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.16/overview/license.md b/docs/versioned_docs/version-2.16/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.16/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.16/overview/performance/application.md b/docs/versioned_docs/version-2.16/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.16/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.16/overview/performance/compute.md b/docs/versioned_docs/version-2.16/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.16/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.16/overview/performance/io.md b/docs/versioned_docs/version-2.16/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.16/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.16/overview/performance/performance.md b/docs/versioned_docs/version-2.16/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.16/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.16/overview/product.md b/docs/versioned_docs/version-2.16/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.16/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.16/overview/security-benefits.md b/docs/versioned_docs/version-2.16/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.16/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.16/reference/cli.md b/docs/versioned_docs/version-2.16/reference/cli.md deleted file mode 100644 index 52391f3d1..000000000 --- a/docs/versioned_docs/version-2.16/reference/cli.md +++ /dev/null @@ -1,842 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.28") -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.16/reference/migration.md b/docs/versioned_docs/version-2.16/reference/migration.md deleted file mode 100644 index 36680eef6..000000000 --- a/docs/versioned_docs/version-2.16/reference/migration.md +++ /dev/null @@ -1,85 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrating from Azure's service principal authentication to managed identity authentication - -- The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -- To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -- Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -- To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - - -## Migrating from CLI versions before 2.10 - -- AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -- The global `nodeGroups` field was added. -- The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -- The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -- The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -- The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -- The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -- The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -- The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -- The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | -
-- The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - - To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - - To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.16/reference/slsa.md b/docs/versioned_docs/version-2.16/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.16/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.16/reference/terraform.md b/docs/versioned_docs/version-2.16/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.16/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.16/workflows/cert-manager.md b/docs/versioned_docs/version-2.16/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.16/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.16/workflows/config.md b/docs/versioned_docs/version-2.16/workflows/config.md deleted file mode 100644 index 11cf31cbd..000000000 --- a/docs/versioned_docs/version-2.16/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.16/workflows/create.md b/docs/versioned_docs/version-2.16/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.16/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.16/workflows/lb.md b/docs/versioned_docs/version-2.16/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.16/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.16/workflows/recovery.md b/docs/versioned_docs/version-2.16/workflows/recovery.md deleted file mode 100644 index 50cd7ee72..000000000 --- a/docs/versioned_docs/version-2.16/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover --master-secret constellation-mastersecret.json -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.16/workflows/s3proxy.md b/docs/versioned_docs/version-2.16/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.16/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.16/workflows/sbom.md b/docs/versioned_docs/version-2.16/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.16/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.16/workflows/scale.md b/docs/versioned_docs/version-2.16/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.16/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.16/workflows/storage.md b/docs/versioned_docs/version-2.16/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.16/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.16/workflows/terraform-provider.md b/docs/versioned_docs/version-2.16/workflows/terraform-provider.md deleted file mode 100644 index ed8f46eda..000000000 --- a/docs/versioned_docs/version-2.16/workflows/terraform-provider.md +++ /dev/null @@ -1,129 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you - can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.16/workflows/troubleshooting.md b/docs/versioned_docs/version-2.16/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.16/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.16/workflows/trusted-launch.md b/docs/versioned_docs/version-2.16/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.16/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.16/workflows/upgrade.md b/docs/versioned_docs/version-2.16/workflows/upgrade.md deleted file mode 100644 index 7348c0dbc..000000000 --- a/docs/versioned_docs/version-2.16/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting it's availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.16/workflows/verify-cli.md b/docs/versioned_docs/version-2.16/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.16/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.16/workflows/verify-cluster.md b/docs/versioned_docs/version-2.16/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.16/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.17/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.17/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.17/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/concept-constellation.svg b/docs/versioned_docs/version-2.17/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.17/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.17/_media/concept-managed.svg b/docs/versioned_docs/version-2.17/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.17/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.17/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.17/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.17/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.17/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.17/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.17/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.17/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.17/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.17/_media/tcb.svg b/docs/versioned_docs/version-2.17/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.17/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.17/architecture/attestation.md b/docs/versioned_docs/version-2.17/architecture/attestation.md deleted file mode 100644 index a07b35e5a..000000000 --- a/docs/versioned_docs/version-2.17/architecture/attestation.md +++ /dev/null @@ -1,394 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for GCP. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.17/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.17/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.17/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.17/architecture/images.md b/docs/versioned_docs/version-2.17/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.17/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.17/architecture/keys.md b/docs/versioned_docs/version-2.17/architecture/keys.md deleted file mode 100644 index 553d9d4e2..000000000 --- a/docs/versioned_docs/version-2.17/architecture/keys.md +++ /dev/null @@ -1,131 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). -Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.17/architecture/microservices.md b/docs/versioned_docs/version-2.17/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.17/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.17/architecture/networking.md b/docs/versioned_docs/version-2.17/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.17/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.17/architecture/observability.md b/docs/versioned_docs/version-2.17/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.17/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.17/architecture/orchestration.md b/docs/versioned_docs/version-2.17/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.17/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.17/architecture/overview.md b/docs/versioned_docs/version-2.17/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.17/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.17/architecture/versions.md b/docs/versioned_docs/version-2.17/architecture/versions.md deleted file mode 100644 index 85d35e6a9..000000000 --- a/docs/versioned_docs/version-2.17/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.28.11 -* v1.29.6 -* v1.30.2 diff --git a/docs/versioned_docs/version-2.17/getting-started/examples.md b/docs/versioned_docs/version-2.17/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.17/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.17/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.17/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.17/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.17/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.17/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.17/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.17/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.17/getting-started/first-steps.md b/docs/versioned_docs/version-2.17/getting-started/first-steps.md deleted file mode 100644 index c31263ed3..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/first-steps.md +++ /dev/null @@ -1,229 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.17/getting-started/install.md b/docs/versioned_docs/version-2.17/getting-started/install.md deleted file mode 100644 index d52e43476..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/install.md +++ /dev/null @@ -1,429 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file under `~/.config/openstack/clouds.yaml` (`%AppData%\openstack\clouds.yaml` on Windows) with the credentials from the User Access Token - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_STACKIT_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.17/getting-started/marketplaces.md b/docs/versioned_docs/version-2.17/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.17/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.17/intro.md b/docs/versioned_docs/version-2.17/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.17/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.17/overview/clouds.md b/docs/versioned_docs/version-2.17/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.17/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.17/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.17/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.17/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.17/overview/license.md b/docs/versioned_docs/version-2.17/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.17/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.17/overview/performance/application.md b/docs/versioned_docs/version-2.17/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.17/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.17/overview/performance/compute.md b/docs/versioned_docs/version-2.17/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.17/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.17/overview/performance/io.md b/docs/versioned_docs/version-2.17/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.17/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.17/overview/performance/performance.md b/docs/versioned_docs/version-2.17/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.17/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.17/overview/product.md b/docs/versioned_docs/version-2.17/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.17/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.17/overview/security-benefits.md b/docs/versioned_docs/version-2.17/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.17/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.17/reference/cli.md b/docs/versioned_docs/version-2.17/reference/cli.md deleted file mode 100644 index a728474e7..000000000 --- a/docs/versioned_docs/version-2.17/reference/cli.md +++ /dev/null @@ -1,843 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-es|gcp-sev-snp|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.29") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.17/reference/migration.md b/docs/versioned_docs/version-2.17/reference/migration.md deleted file mode 100644 index 36680eef6..000000000 --- a/docs/versioned_docs/version-2.17/reference/migration.md +++ /dev/null @@ -1,85 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrating from Azure's service principal authentication to managed identity authentication - -- The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -- To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -- Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -- To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - - -## Migrating from CLI versions before 2.10 - -- AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -- The global `nodeGroups` field was added. -- The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -- The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -- The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -- The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -- The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -- The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -- The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -- The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | -
-- The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - - To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - - To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.17/reference/slsa.md b/docs/versioned_docs/version-2.17/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.17/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.17/reference/terraform.md b/docs/versioned_docs/version-2.17/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.17/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.17/workflows/cert-manager.md b/docs/versioned_docs/version-2.17/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.17/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.17/workflows/config.md b/docs/versioned_docs/version-2.17/workflows/config.md deleted file mode 100644 index 11cf31cbd..000000000 --- a/docs/versioned_docs/version-2.17/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.17/workflows/create.md b/docs/versioned_docs/version-2.17/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.17/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.17/workflows/lb.md b/docs/versioned_docs/version-2.17/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.17/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.17/workflows/recovery.md b/docs/versioned_docs/version-2.17/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.17/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.17/workflows/s3proxy.md b/docs/versioned_docs/version-2.17/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.17/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.17/workflows/sbom.md b/docs/versioned_docs/version-2.17/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.17/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.17/workflows/scale.md b/docs/versioned_docs/version-2.17/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.17/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.17/workflows/storage.md b/docs/versioned_docs/version-2.17/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.17/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.17/workflows/terraform-provider.md b/docs/versioned_docs/version-2.17/workflows/terraform-provider.md deleted file mode 100644 index ed8f46eda..000000000 --- a/docs/versioned_docs/version-2.17/workflows/terraform-provider.md +++ /dev/null @@ -1,129 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you - can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.17/workflows/troubleshooting.md b/docs/versioned_docs/version-2.17/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.17/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.17/workflows/trusted-launch.md b/docs/versioned_docs/version-2.17/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.17/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.17/workflows/upgrade.md b/docs/versioned_docs/version-2.17/workflows/upgrade.md deleted file mode 100644 index 7348c0dbc..000000000 --- a/docs/versioned_docs/version-2.17/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting it's availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.17/workflows/verify-cli.md b/docs/versioned_docs/version-2.17/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.17/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.17/workflows/verify-cluster.md b/docs/versioned_docs/version-2.17/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.17/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.18/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.18/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.18/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/concept-constellation.svg b/docs/versioned_docs/version-2.18/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.18/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.18/_media/concept-managed.svg b/docs/versioned_docs/version-2.18/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.18/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.18/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.18/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.18/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.18/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.18/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.18/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.18/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.18/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.18/_media/tcb.svg b/docs/versioned_docs/version-2.18/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.18/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.18/architecture/attestation.md b/docs/versioned_docs/version-2.18/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.18/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.18/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.18/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.18/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.18/architecture/images.md b/docs/versioned_docs/version-2.18/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.18/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.18/architecture/keys.md b/docs/versioned_docs/version-2.18/architecture/keys.md deleted file mode 100644 index 553d9d4e2..000000000 --- a/docs/versioned_docs/version-2.18/architecture/keys.md +++ /dev/null @@ -1,131 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). -Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.18/architecture/microservices.md b/docs/versioned_docs/version-2.18/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.18/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.18/architecture/networking.md b/docs/versioned_docs/version-2.18/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.18/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.18/architecture/observability.md b/docs/versioned_docs/version-2.18/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.18/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.18/architecture/orchestration.md b/docs/versioned_docs/version-2.18/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.18/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.18/architecture/overview.md b/docs/versioned_docs/version-2.18/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.18/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.18/architecture/versions.md b/docs/versioned_docs/version-2.18/architecture/versions.md deleted file mode 100644 index 30d9d28e2..000000000 --- a/docs/versioned_docs/version-2.18/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.28.13 -* v1.29.8 -* v1.30.4 diff --git a/docs/versioned_docs/version-2.18/getting-started/examples.md b/docs/versioned_docs/version-2.18/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.18/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.18/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.18/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.18/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.18/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.18/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.18/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.18/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.18/getting-started/first-steps.md b/docs/versioned_docs/version-2.18/getting-started/first-steps.md deleted file mode 100644 index 18b723565..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/first-steps.md +++ /dev/null @@ -1,229 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.18/getting-started/install.md b/docs/versioned_docs/version-2.18/getting-started/install.md deleted file mode 100644 index d52e43476..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/install.md +++ /dev/null @@ -1,429 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file under `~/.config/openstack/clouds.yaml` (`%AppData%\openstack\clouds.yaml` on Windows) with the credentials from the User Access Token - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_STACKIT_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.18/getting-started/marketplaces.md b/docs/versioned_docs/version-2.18/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.18/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.18/intro.md b/docs/versioned_docs/version-2.18/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.18/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.18/overview/clouds.md b/docs/versioned_docs/version-2.18/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.18/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.18/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.18/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.18/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.18/overview/license.md b/docs/versioned_docs/version-2.18/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.18/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.18/overview/performance/application.md b/docs/versioned_docs/version-2.18/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.18/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.18/overview/performance/compute.md b/docs/versioned_docs/version-2.18/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.18/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.18/overview/performance/io.md b/docs/versioned_docs/version-2.18/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.18/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.18/overview/performance/performance.md b/docs/versioned_docs/version-2.18/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.18/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.18/overview/product.md b/docs/versioned_docs/version-2.18/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.18/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.18/overview/security-benefits.md b/docs/versioned_docs/version-2.18/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.18/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.18/reference/cli.md b/docs/versioned_docs/version-2.18/reference/cli.md deleted file mode 100644 index 99acef520..000000000 --- a/docs/versioned_docs/version-2.18/reference/cli.md +++ /dev/null @@ -1,844 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.29") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.18/reference/migration.md b/docs/versioned_docs/version-2.18/reference/migration.md deleted file mode 100644 index 36680eef6..000000000 --- a/docs/versioned_docs/version-2.18/reference/migration.md +++ /dev/null @@ -1,85 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrating from Azure's service principal authentication to managed identity authentication - -- The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -- To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -- Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -- To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - - -## Migrating from CLI versions before 2.10 - -- AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -- The global `nodeGroups` field was added. -- The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -- The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -- The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -- The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -- The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -- The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -- The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -- The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | -
-- The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - - To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - - To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.18/reference/slsa.md b/docs/versioned_docs/version-2.18/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.18/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.18/reference/terraform.md b/docs/versioned_docs/version-2.18/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.18/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.18/workflows/cert-manager.md b/docs/versioned_docs/version-2.18/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.18/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.18/workflows/config.md b/docs/versioned_docs/version-2.18/workflows/config.md deleted file mode 100644 index a8a52980e..000000000 --- a/docs/versioned_docs/version-2.18/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.18/workflows/create.md b/docs/versioned_docs/version-2.18/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.18/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.18/workflows/lb.md b/docs/versioned_docs/version-2.18/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.18/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.18/workflows/recovery.md b/docs/versioned_docs/version-2.18/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.18/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.18/workflows/s3proxy.md b/docs/versioned_docs/version-2.18/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.18/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.18/workflows/sbom.md b/docs/versioned_docs/version-2.18/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.18/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.18/workflows/scale.md b/docs/versioned_docs/version-2.18/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.18/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.18/workflows/storage.md b/docs/versioned_docs/version-2.18/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.18/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.18/workflows/terraform-provider.md b/docs/versioned_docs/version-2.18/workflows/terraform-provider.md deleted file mode 100644 index ed8f46eda..000000000 --- a/docs/versioned_docs/version-2.18/workflows/terraform-provider.md +++ /dev/null @@ -1,129 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - When creating a cluster on Azure, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you - can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.18/workflows/troubleshooting.md b/docs/versioned_docs/version-2.18/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.18/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.18/workflows/trusted-launch.md b/docs/versioned_docs/version-2.18/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.18/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.18/workflows/upgrade.md b/docs/versioned_docs/version-2.18/workflows/upgrade.md deleted file mode 100644 index 7348c0dbc..000000000 --- a/docs/versioned_docs/version-2.18/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting it's availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.18/workflows/verify-cli.md b/docs/versioned_docs/version-2.18/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.18/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.18/workflows/verify-cluster.md b/docs/versioned_docs/version-2.18/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.18/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.19/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.19/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.19/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/concept-constellation.svg b/docs/versioned_docs/version-2.19/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.19/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.19/_media/concept-managed.svg b/docs/versioned_docs/version-2.19/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.19/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.19/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.19/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.19/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.19/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.19/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.19/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.19/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.19/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.19/_media/tcb.svg b/docs/versioned_docs/version-2.19/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.19/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.19/architecture/attestation.md b/docs/versioned_docs/version-2.19/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.19/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.19/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.19/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.19/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.19/architecture/images.md b/docs/versioned_docs/version-2.19/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.19/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.19/architecture/keys.md b/docs/versioned_docs/version-2.19/architecture/keys.md deleted file mode 100644 index 553d9d4e2..000000000 --- a/docs/versioned_docs/version-2.19/architecture/keys.md +++ /dev/null @@ -1,131 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). -Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.19/architecture/microservices.md b/docs/versioned_docs/version-2.19/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.19/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.19/architecture/networking.md b/docs/versioned_docs/version-2.19/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.19/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.19/architecture/observability.md b/docs/versioned_docs/version-2.19/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.19/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.19/architecture/orchestration.md b/docs/versioned_docs/version-2.19/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.19/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.19/architecture/overview.md b/docs/versioned_docs/version-2.19/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.19/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.19/architecture/versions.md b/docs/versioned_docs/version-2.19/architecture/versions.md deleted file mode 100644 index 9d5a064e0..000000000 --- a/docs/versioned_docs/version-2.19/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.28.15 -* v1.29.11 -* v1.30.7 diff --git a/docs/versioned_docs/version-2.19/getting-started/examples.md b/docs/versioned_docs/version-2.19/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.19/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.19/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.19/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.19/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.19/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.19/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.19/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.19/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.19/getting-started/first-steps.md b/docs/versioned_docs/version-2.19/getting-started/first-steps.md deleted file mode 100644 index 2afe95635..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/first-steps.md +++ /dev/null @@ -1,235 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.19/getting-started/install.md b/docs/versioned_docs/version-2.19/getting-started/install.md deleted file mode 100644 index 29be1e7f6..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/install.md +++ /dev/null @@ -1,439 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_STACKIT_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.19/getting-started/marketplaces.md b/docs/versioned_docs/version-2.19/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.19/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.19/intro.md b/docs/versioned_docs/version-2.19/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.19/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.19/overview/clouds.md b/docs/versioned_docs/version-2.19/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.19/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.19/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.19/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.19/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.19/overview/license.md b/docs/versioned_docs/version-2.19/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.19/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.19/overview/performance/application.md b/docs/versioned_docs/version-2.19/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.19/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.19/overview/performance/compute.md b/docs/versioned_docs/version-2.19/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.19/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.19/overview/performance/io.md b/docs/versioned_docs/version-2.19/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.19/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.19/overview/performance/performance.md b/docs/versioned_docs/version-2.19/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.19/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.19/overview/product.md b/docs/versioned_docs/version-2.19/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.19/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.19/overview/security-benefits.md b/docs/versioned_docs/version-2.19/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.19/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.19/reference/cli.md b/docs/versioned_docs/version-2.19/reference/cli.md deleted file mode 100644 index 99acef520..000000000 --- a/docs/versioned_docs/version-2.19/reference/cli.md +++ /dev/null @@ -1,844 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.29") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.19/reference/migration.md b/docs/versioned_docs/version-2.19/reference/migration.md deleted file mode 100644 index 0252c409f..000000000 --- a/docs/versioned_docs/version-2.19/reference/migration.md +++ /dev/null @@ -1,128 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrations to v2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from Azure's service principal authentication to managed identity authentication (during the upgrade to Constellation v2.8.0) - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - -## Migrating from CLI versions before 2.10 - -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | - -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.19/reference/slsa.md b/docs/versioned_docs/version-2.19/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.19/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.19/reference/terraform.md b/docs/versioned_docs/version-2.19/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.19/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.19/workflows/cert-manager.md b/docs/versioned_docs/version-2.19/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.19/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.19/workflows/config.md b/docs/versioned_docs/version-2.19/workflows/config.md deleted file mode 100644 index a8a52980e..000000000 --- a/docs/versioned_docs/version-2.19/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.19/workflows/create.md b/docs/versioned_docs/version-2.19/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.19/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.19/workflows/lb.md b/docs/versioned_docs/version-2.19/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.19/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.19/workflows/recovery.md b/docs/versioned_docs/version-2.19/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.19/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.19/workflows/s3proxy.md b/docs/versioned_docs/version-2.19/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.19/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.19/workflows/sbom.md b/docs/versioned_docs/version-2.19/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.19/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.19/workflows/scale.md b/docs/versioned_docs/version-2.19/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.19/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.19/workflows/storage.md b/docs/versioned_docs/version-2.19/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.19/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.19/workflows/terraform-provider.md b/docs/versioned_docs/version-2.19/workflows/terraform-provider.md deleted file mode 100644 index c7a795d3f..000000000 --- a/docs/versioned_docs/version-2.19/workflows/terraform-provider.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.19/workflows/troubleshooting.md b/docs/versioned_docs/version-2.19/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.19/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.19/workflows/trusted-launch.md b/docs/versioned_docs/version-2.19/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.19/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.19/workflows/upgrade.md b/docs/versioned_docs/version-2.19/workflows/upgrade.md deleted file mode 100644 index 3db2ecad6..000000000 --- a/docs/versioned_docs/version-2.19/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.19/workflows/verify-cli.md b/docs/versioned_docs/version-2.19/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.19/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.19/workflows/verify-cluster.md b/docs/versioned_docs/version-2.19/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.19/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.2/architecture/attestation.md b/docs/versioned_docs/version-2.2/architecture/attestation.md index c09d0f546..c70a61264 100644 --- a/docs/versioned_docs/version-2.2/architecture/attestation.md +++ b/docs/versioned_docs/version-2.2/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.2/architecture/keys.md b/docs/versioned_docs/version-2.2/architecture/keys.md index b7d7ef6f5..aa4e35496 100644 --- a/docs/versioned_docs/version-2.2/architecture/keys.md +++ b/docs/versioned_docs/version-2.2/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.2/getting-started/first-steps.md b/docs/versioned_docs/version-2.2/getting-started/first-steps.md index ef4861cbe..2850176a0 100644 --- a/docs/versioned_docs/version-2.2/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.2/getting-started/first-steps.md @@ -11,36 +11,36 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step 1. Create the configuration file for your selected cloud provider. - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in your current working directory. 2. Fill in your cloud provider specific information. - - + + You need several resources for the cluster. You can use the following `az` script to create them: @@ -71,8 +71,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Run `constellation config instance-types` to get the list of all supported options. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -118,8 +118,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Run `constellation config instance-types` to get the list of all supported options. - - + + You need a service account for the cluster. You can use the following `gcloud` script to create it: @@ -142,18 +142,18 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines from the N2D family with a minimum of 4 vCPUs. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - * **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. + * **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. + * **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -171,8 +171,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Supported are all machines from the N2D family with a minimum of 4 vCPUs. It defaults to `n2d-standard-4` (4 vCPUs, 16 GB RAM), but you can use any other VMs from the same family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -211,8 +211,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - + + :::info diff --git a/docs/versioned_docs/version-2.2/getting-started/install.md b/docs/versioned_docs/version-2.2/getting-started/install.md index 439b734dd..d717dcb34 100644 --- a/docs/versioned_docs/version-2.2/getting-started/install.md +++ b/docs/versioned_docs/version-2.2/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,15 +108,14 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Compute` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` -- `microsoft.insights` +* `Microsoft.Compute` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` +* `microsoft.insights` By default, Constellation tries to register these automatically if they haven't been registered before. @@ -125,8 +127,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -138,8 +140,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -270,8 +272,8 @@ such as `PowerUserAccess`, or use the following minimal set of permissions: Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -281,8 +283,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -298,8 +300,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -322,8 +324,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -339,9 +341,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.2/overview/clouds.md b/docs/versioned_docs/version-2.2/overview/clouds.md index c48f23cf0..01e7a00c5 100644 --- a/docs/versioned_docs/version-2.2/overview/clouds.md +++ b/docs/versioned_docs/version-2.2/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.2/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.2/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.2/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.2/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.2/overview/product.md b/docs/versioned_docs/version-2.2/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.2/overview/product.md +++ b/docs/versioned_docs/version-2.2/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.2/workflows/create.md b/docs/versioned_docs/version-2.2/workflows/create.md index dcb3ff285..d886fb347 100644 --- a/docs/versioned_docs/version-2.2/workflows/create.md +++ b/docs/versioned_docs/version-2.2/workflows/create.md @@ -19,29 +19,29 @@ This step creates the necessary resources for your cluster in your cloud environ Generate a configuration file for your cloud service provider (CSP): - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. [Fill in your CSP-specific information](../getting-started/first-steps.md#create-a-cluster) before you continue. diff --git a/docs/versioned_docs/version-2.2/workflows/recovery.md b/docs/versioned_docs/version-2.2/workflows/recovery.md index 0fd171036..fd610fc67 100644 --- a/docs/versioned_docs/version-2.2/workflows/recovery.md +++ b/docs/versioned_docs/version-2.2/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.2/workflows/sbom.md b/docs/versioned_docs/version-2.2/workflows/sbom.md index e8ba25a64..ec9834b4f 100644 --- a/docs/versioned_docs/version-2.2/workflows/sbom.md +++ b/docs/versioned_docs/version-2.2/workflows/sbom.md @@ -15,7 +15,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -36,7 +36,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.2/workflows/scale.md b/docs/versioned_docs/version-2.2/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.2/workflows/scale.md +++ b/docs/versioned_docs/version-2.2/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.2/workflows/storage.md b/docs/versioned_docs/version-2.2/workflows/storage.md index c322d97d1..878449fa5 100644 --- a/docs/versioned_docs/version-2.2/workflows/storage.md +++ b/docs/versioned_docs/version-2.2/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -56,8 +56,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The following installation guide gives an overview of how to securely use CSI-based cloud storage for persistent volumes in Constellation. - - + + 1. Install the driver: @@ -67,8 +67,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install azuredisk-csi-driver charts/edgeless --namespace kube-system ``` - - + + 1. Install the driver: @@ -77,8 +77,8 @@ The following installation guide gives an overview of how to securely use CSI-ba helm install gcp-compute-persistent-disk-csi-driver charts/ --namespace kube-system ``` - - + + :::caution @@ -88,8 +88,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + :::info @@ -160,8 +160,8 @@ The default storage class is responsible for all persistent volume claims that d The previous instructions create a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -207,8 +207,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -254,8 +254,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -265,5 +265,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.2/workflows/troubleshooting.md b/docs/versioned_docs/version-2.2/workflows/troubleshooting.md index 59015efcb..ad5e1c51b 100644 --- a/docs/versioned_docs/version-2.2/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.2/workflows/troubleshooting.md @@ -5,7 +5,6 @@ This section aids you in finding problems when working with Constellation. ## Azure: Resource Providers can't be registered On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: - ```shell-session Error: Error ensuring Resource Providers are registered. @@ -22,13 +21,11 @@ To continue, please ensure that the [required resource providers](../getting-sta Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. For example: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation create --control-plane-nodes 1 --worker-nodes 2 -y ``` Or alternatively, for `terminate`: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` @@ -39,8 +36,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -50,8 +47,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -66,16 +63,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ## Connect to nodes via SSH diff --git a/docs/versioned_docs/version-2.2/workflows/trusted-launch.md b/docs/versioned_docs/version-2.2/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.2/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.2/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.2/workflows/verify-cli.md b/docs/versioned_docs/version-2.2/workflows/verify-cli.md index 52ed24d95..0a52fedd4 100644 --- a/docs/versioned_docs/version-2.2/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.2/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.20/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.20/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.20/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/concept-constellation.svg b/docs/versioned_docs/version-2.20/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.20/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.20/_media/concept-managed.svg b/docs/versioned_docs/version-2.20/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.20/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.20/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.20/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.20/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.20/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.20/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.20/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.20/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.20/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.20/_media/tcb.svg b/docs/versioned_docs/version-2.20/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.20/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.20/architecture/attestation.md b/docs/versioned_docs/version-2.20/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.20/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.20/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.20/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.20/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.20/architecture/images.md b/docs/versioned_docs/version-2.20/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.20/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.20/architecture/keys.md b/docs/versioned_docs/version-2.20/architecture/keys.md deleted file mode 100644 index 49821cd0b..000000000 --- a/docs/versioned_docs/version-2.20/architecture/keys.md +++ /dev/null @@ -1,130 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.20/architecture/microservices.md b/docs/versioned_docs/version-2.20/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.20/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.20/architecture/networking.md b/docs/versioned_docs/version-2.20/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.20/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.20/architecture/observability.md b/docs/versioned_docs/version-2.20/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.20/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.20/architecture/orchestration.md b/docs/versioned_docs/version-2.20/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.20/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.20/architecture/overview.md b/docs/versioned_docs/version-2.20/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.20/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.20/architecture/versions.md b/docs/versioned_docs/version-2.20/architecture/versions.md deleted file mode 100644 index 9d5a064e0..000000000 --- a/docs/versioned_docs/version-2.20/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.28.15 -* v1.29.11 -* v1.30.7 diff --git a/docs/versioned_docs/version-2.20/getting-started/examples.md b/docs/versioned_docs/version-2.20/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.20/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.20/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.20/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.20/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.20/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.20/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.20/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.20/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.20/getting-started/first-steps.md b/docs/versioned_docs/version-2.20/getting-started/first-steps.md deleted file mode 100644 index 2afe95635..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/first-steps.md +++ /dev/null @@ -1,235 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.20/getting-started/install.md b/docs/versioned_docs/version-2.20/getting-started/install.md deleted file mode 100644 index 29be1e7f6..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/install.md +++ /dev/null @@ -1,439 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_STACKIT_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.20/getting-started/marketplaces.md b/docs/versioned_docs/version-2.20/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.20/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.20/intro.md b/docs/versioned_docs/version-2.20/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.20/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.20/overview/clouds.md b/docs/versioned_docs/version-2.20/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.20/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.20/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.20/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.20/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.20/overview/license.md b/docs/versioned_docs/version-2.20/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.20/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.20/overview/performance/application.md b/docs/versioned_docs/version-2.20/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.20/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.20/overview/performance/compute.md b/docs/versioned_docs/version-2.20/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.20/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.20/overview/performance/io.md b/docs/versioned_docs/version-2.20/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.20/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.20/overview/performance/performance.md b/docs/versioned_docs/version-2.20/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.20/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.20/overview/product.md b/docs/versioned_docs/version-2.20/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.20/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.20/overview/security-benefits.md b/docs/versioned_docs/version-2.20/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.20/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.20/reference/cli.md b/docs/versioned_docs/version-2.20/reference/cli.md deleted file mode 100644 index 99acef520..000000000 --- a/docs/versioned_docs/version-2.20/reference/cli.md +++ /dev/null @@ -1,844 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.29") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.20/reference/migration.md b/docs/versioned_docs/version-2.20/reference/migration.md deleted file mode 100644 index 0252c409f..000000000 --- a/docs/versioned_docs/version-2.20/reference/migration.md +++ /dev/null @@ -1,128 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrations to v2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from Azure's service principal authentication to managed identity authentication (during the upgrade to Constellation v2.8.0) - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - -## Migrating from CLI versions before 2.10 - -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | - -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.20/reference/slsa.md b/docs/versioned_docs/version-2.20/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.20/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.20/reference/terraform.md b/docs/versioned_docs/version-2.20/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.20/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.20/workflows/cert-manager.md b/docs/versioned_docs/version-2.20/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.20/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.20/workflows/config.md b/docs/versioned_docs/version-2.20/workflows/config.md deleted file mode 100644 index a8a52980e..000000000 --- a/docs/versioned_docs/version-2.20/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.20/workflows/create.md b/docs/versioned_docs/version-2.20/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.20/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.20/workflows/lb.md b/docs/versioned_docs/version-2.20/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.20/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.20/workflows/recovery.md b/docs/versioned_docs/version-2.20/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.20/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.20/workflows/reproducible-builds.md b/docs/versioned_docs/version-2.20/workflows/reproducible-builds.md deleted file mode 100644 index e3bc46095..000000000 --- a/docs/versioned_docs/version-2.20/workflows/reproducible-builds.md +++ /dev/null @@ -1,63 +0,0 @@ -# Reproduce released artifacts - -Constellation has first-class support for [reproducible builds](https://reproducible-builds.org). -Reproducing the released artifacts is an alternative to [signature verification](verify-cli.md) that doesn't require trusting Edgeless Systems' release process. -The following sections describe how to rebuild an artifact and how Constellation ensures that the rebuild reproduces the artifacts bit-by-bit. - -## Build environment prerequisites - -The build systems used by Constellation - [Bazel](https://bazel.build/) and [Nix](https://nixos.org) - are designed for deterministic, reproducible builds. -These two dependencies should be the only prerequisites for a successful build. -However, it can't be ruled out completely that peculiarities of the host affect the build result. -Thus, we recommend the following host setup for best results: - -1. A Linux operating system not older than v5.4. -2. The GNU C library not older than v2.31 (avoid `musl`). -3. GNU `coreutils` not older than v8.30 (avoid `busybox`). -4. An `ext4` filesystem for building. -5. AppArmor turned off. - -This is given, for example, on an Ubuntu 22.04 system, which is also used for reproducibility tests. - -:::note - -To avoid any backwards-compatibility issues, the host software versions should also not be much newer than the Constellation release. - -::: - -## Run the build - -The following instructions outline qualitatively how to reproduce a build. -Constellation implements these instructions in the [Reproducible Builds workflow](https://github.com/edgelesssys/constellation/actions/workflows/reproducible-builds.yml), which continuously tests for reproducibility. -The workflow is a good place to look up specific version numbers and build steps. - -1. Check out the Constellation repository at the tag corresponding to the release. - - ```bash - git clone https://github.com/edgelesssys/constellation.git - cd constellation - git checkout v2.20.0 - ``` - -2. [Install the Bazel release](https://bazel.build/install) specified in `.bazelversion`. -3. [Install Nix](https://nixos.org/download/) (any recent version should do). -4. Run the build with `bazel build $target` for one of the following targets of interest: - - ```data - //cli:cli_enterprise_darwin_amd64 - //cli:cli_enterprise_darwin_arm64 - //cli:cli_enterprise_linux_amd64 - //cli:cli_enterprise_linux_arm64 - //cli:cli_enterprise_windows_amd64 - ``` - -5. Compare the build result with the downloaded release artifact. - - - -## Feedback - -Reproduction failures often indicate a bug in the build system or in the build definitions. -Therefore, we're interested in any reproducibility issues you might encounter. -[Start a bug report](https://github.com/edgelesssys/constellation/issues/new/choose) and describe the details of your build environment. -Make sure to include your result binary or a [`diffoscope`](https://diffoscope.org/) report, if possible. diff --git a/docs/versioned_docs/version-2.20/workflows/s3proxy.md b/docs/versioned_docs/version-2.20/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.20/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.20/workflows/sbom.md b/docs/versioned_docs/version-2.20/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.20/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.20/workflows/scale.md b/docs/versioned_docs/version-2.20/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.20/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.20/workflows/storage.md b/docs/versioned_docs/version-2.20/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.20/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.20/workflows/terraform-provider.md b/docs/versioned_docs/version-2.20/workflows/terraform-provider.md deleted file mode 100644 index c7a795d3f..000000000 --- a/docs/versioned_docs/version-2.20/workflows/terraform-provider.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.20/workflows/troubleshooting.md b/docs/versioned_docs/version-2.20/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.20/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.20/workflows/trusted-launch.md b/docs/versioned_docs/version-2.20/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.20/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.20/workflows/upgrade.md b/docs/versioned_docs/version-2.20/workflows/upgrade.md deleted file mode 100644 index 3db2ecad6..000000000 --- a/docs/versioned_docs/version-2.20/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.20/workflows/verify-cli.md b/docs/versioned_docs/version-2.20/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.20/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.20/workflows/verify-cluster.md b/docs/versioned_docs/version-2.20/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.20/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.21/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.21/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.21/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/concept-constellation.svg b/docs/versioned_docs/version-2.21/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.21/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.21/_media/concept-managed.svg b/docs/versioned_docs/version-2.21/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.21/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.21/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.21/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.21/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.21/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.21/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.21/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.21/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.21/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.21/_media/tcb.svg b/docs/versioned_docs/version-2.21/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.21/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.21/architecture/attestation.md b/docs/versioned_docs/version-2.21/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.21/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.21/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.21/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.21/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.21/architecture/images.md b/docs/versioned_docs/version-2.21/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.21/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.21/architecture/keys.md b/docs/versioned_docs/version-2.21/architecture/keys.md deleted file mode 100644 index 49821cd0b..000000000 --- a/docs/versioned_docs/version-2.21/architecture/keys.md +++ /dev/null @@ -1,130 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.21/architecture/microservices.md b/docs/versioned_docs/version-2.21/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.21/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.21/architecture/networking.md b/docs/versioned_docs/version-2.21/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.21/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.21/architecture/observability.md b/docs/versioned_docs/version-2.21/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.21/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.21/architecture/orchestration.md b/docs/versioned_docs/version-2.21/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.21/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.21/architecture/overview.md b/docs/versioned_docs/version-2.21/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.21/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.21/architecture/versions.md b/docs/versioned_docs/version-2.21/architecture/versions.md deleted file mode 100644 index 9acc866ed..000000000 --- a/docs/versioned_docs/version-2.21/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.29.14 -* v1.30.10 -* v1.31.6 diff --git a/docs/versioned_docs/version-2.21/getting-started/examples.md b/docs/versioned_docs/version-2.21/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.21/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.21/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.21/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.21/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.21/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.21/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.21/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.21/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.21/getting-started/first-steps.md b/docs/versioned_docs/version-2.21/getting-started/first-steps.md deleted file mode 100644 index 2afe95635..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/first-steps.md +++ /dev/null @@ -1,235 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.21/getting-started/install.md b/docs/versioned_docs/version-2.21/getting-started/install.md deleted file mode 100644 index f7b36770a..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/install.md +++ /dev/null @@ -1,439 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_OPENSTACK_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.21/getting-started/marketplaces.md b/docs/versioned_docs/version-2.21/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.21/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.21/intro.md b/docs/versioned_docs/version-2.21/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.21/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.21/overview/clouds.md b/docs/versioned_docs/version-2.21/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.21/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.21/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.21/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.21/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.21/overview/license.md b/docs/versioned_docs/version-2.21/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.21/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.21/overview/performance/application.md b/docs/versioned_docs/version-2.21/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.21/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.21/overview/performance/compute.md b/docs/versioned_docs/version-2.21/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.21/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.21/overview/performance/io.md b/docs/versioned_docs/version-2.21/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.21/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.21/overview/performance/performance.md b/docs/versioned_docs/version-2.21/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.21/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.21/overview/product.md b/docs/versioned_docs/version-2.21/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.21/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.21/overview/security-benefits.md b/docs/versioned_docs/version-2.21/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.21/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.21/reference/cli.md b/docs/versioned_docs/version-2.21/reference/cli.md deleted file mode 100644 index 7bb4d5b40..000000000 --- a/docs/versioned_docs/version-2.21/reference/cli.md +++ /dev/null @@ -1,873 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster -* [ssh](#constellation-ssh): Prepare your cluster for emergency ssh access - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.30") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --serviceAccountID string ID for the service account that will be created (required) - Must be 6 to 30 lowercase letters, digits, or hyphens. - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation ssh - -Prepare your cluster for emergency ssh access - -### Synopsis - -Prepare your cluster for emergency ssh access and sign a given key pair for authorization. - -``` -constellation ssh [flags] -``` - -### Options - -``` - -h, --help help for ssh - --key string the path to an existing ssh public key -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.21/reference/migration.md b/docs/versioned_docs/version-2.21/reference/migration.md deleted file mode 100644 index 0252c409f..000000000 --- a/docs/versioned_docs/version-2.21/reference/migration.md +++ /dev/null @@ -1,128 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrations to v2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from Azure's service principal authentication to managed identity authentication (during the upgrade to Constellation v2.8.0) - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - -## Migrating from CLI versions before 2.10 - -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | - -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.21/reference/slsa.md b/docs/versioned_docs/version-2.21/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.21/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.21/reference/terraform.md b/docs/versioned_docs/version-2.21/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.21/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.21/workflows/cert-manager.md b/docs/versioned_docs/version-2.21/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.21/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.21/workflows/config.md b/docs/versioned_docs/version-2.21/workflows/config.md deleted file mode 100644 index a8a52980e..000000000 --- a/docs/versioned_docs/version-2.21/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.21/workflows/create.md b/docs/versioned_docs/version-2.21/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.21/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.21/workflows/lb.md b/docs/versioned_docs/version-2.21/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.21/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.21/workflows/recovery.md b/docs/versioned_docs/version-2.21/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.21/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.21/workflows/reproducible-builds.md b/docs/versioned_docs/version-2.21/workflows/reproducible-builds.md deleted file mode 100644 index e3bc46095..000000000 --- a/docs/versioned_docs/version-2.21/workflows/reproducible-builds.md +++ /dev/null @@ -1,63 +0,0 @@ -# Reproduce released artifacts - -Constellation has first-class support for [reproducible builds](https://reproducible-builds.org). -Reproducing the released artifacts is an alternative to [signature verification](verify-cli.md) that doesn't require trusting Edgeless Systems' release process. -The following sections describe how to rebuild an artifact and how Constellation ensures that the rebuild reproduces the artifacts bit-by-bit. - -## Build environment prerequisites - -The build systems used by Constellation - [Bazel](https://bazel.build/) and [Nix](https://nixos.org) - are designed for deterministic, reproducible builds. -These two dependencies should be the only prerequisites for a successful build. -However, it can't be ruled out completely that peculiarities of the host affect the build result. -Thus, we recommend the following host setup for best results: - -1. A Linux operating system not older than v5.4. -2. The GNU C library not older than v2.31 (avoid `musl`). -3. GNU `coreutils` not older than v8.30 (avoid `busybox`). -4. An `ext4` filesystem for building. -5. AppArmor turned off. - -This is given, for example, on an Ubuntu 22.04 system, which is also used for reproducibility tests. - -:::note - -To avoid any backwards-compatibility issues, the host software versions should also not be much newer than the Constellation release. - -::: - -## Run the build - -The following instructions outline qualitatively how to reproduce a build. -Constellation implements these instructions in the [Reproducible Builds workflow](https://github.com/edgelesssys/constellation/actions/workflows/reproducible-builds.yml), which continuously tests for reproducibility. -The workflow is a good place to look up specific version numbers and build steps. - -1. Check out the Constellation repository at the tag corresponding to the release. - - ```bash - git clone https://github.com/edgelesssys/constellation.git - cd constellation - git checkout v2.20.0 - ``` - -2. [Install the Bazel release](https://bazel.build/install) specified in `.bazelversion`. -3. [Install Nix](https://nixos.org/download/) (any recent version should do). -4. Run the build with `bazel build $target` for one of the following targets of interest: - - ```data - //cli:cli_enterprise_darwin_amd64 - //cli:cli_enterprise_darwin_arm64 - //cli:cli_enterprise_linux_amd64 - //cli:cli_enterprise_linux_arm64 - //cli:cli_enterprise_windows_amd64 - ``` - -5. Compare the build result with the downloaded release artifact. - - - -## Feedback - -Reproduction failures often indicate a bug in the build system or in the build definitions. -Therefore, we're interested in any reproducibility issues you might encounter. -[Start a bug report](https://github.com/edgelesssys/constellation/issues/new/choose) and describe the details of your build environment. -Make sure to include your result binary or a [`diffoscope`](https://diffoscope.org/) report, if possible. diff --git a/docs/versioned_docs/version-2.21/workflows/s3proxy.md b/docs/versioned_docs/version-2.21/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.21/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.21/workflows/sbom.md b/docs/versioned_docs/version-2.21/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.21/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.21/workflows/scale.md b/docs/versioned_docs/version-2.21/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.21/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.21/workflows/storage.md b/docs/versioned_docs/version-2.21/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.21/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.21/workflows/terraform-provider.md b/docs/versioned_docs/version-2.21/workflows/terraform-provider.md deleted file mode 100644 index c7a795d3f..000000000 --- a/docs/versioned_docs/version-2.21/workflows/terraform-provider.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.21/workflows/troubleshooting.md b/docs/versioned_docs/version-2.21/workflows/troubleshooting.md deleted file mode 100644 index 195bce1cc..000000000 --- a/docs/versioned_docs/version-2.21/workflows/troubleshooting.md +++ /dev/null @@ -1,151 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` diff --git a/docs/versioned_docs/version-2.21/workflows/trusted-launch.md b/docs/versioned_docs/version-2.21/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.21/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.21/workflows/upgrade.md b/docs/versioned_docs/version-2.21/workflows/upgrade.md deleted file mode 100644 index 3db2ecad6..000000000 --- a/docs/versioned_docs/version-2.21/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.21/workflows/verify-cli.md b/docs/versioned_docs/version-2.21/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.21/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.21/workflows/verify-cluster.md b/docs/versioned_docs/version-2.21/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.21/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.22/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.22/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.22/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/concept-constellation.svg b/docs/versioned_docs/version-2.22/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.22/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.22/_media/concept-managed.svg b/docs/versioned_docs/version-2.22/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.22/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.22/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.22/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.22/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.22/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.22/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.22/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.22/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.22/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.22/_media/tcb.svg b/docs/versioned_docs/version-2.22/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.22/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.22/architecture/attestation.md b/docs/versioned_docs/version-2.22/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.22/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.22/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.22/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.22/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.22/architecture/images.md b/docs/versioned_docs/version-2.22/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.22/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.22/architecture/keys.md b/docs/versioned_docs/version-2.22/architecture/keys.md deleted file mode 100644 index 49821cd0b..000000000 --- a/docs/versioned_docs/version-2.22/architecture/keys.md +++ /dev/null @@ -1,130 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.22/architecture/microservices.md b/docs/versioned_docs/version-2.22/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.22/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.22/architecture/networking.md b/docs/versioned_docs/version-2.22/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.22/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.22/architecture/observability.md b/docs/versioned_docs/version-2.22/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.22/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.22/architecture/orchestration.md b/docs/versioned_docs/version-2.22/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.22/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.22/architecture/overview.md b/docs/versioned_docs/version-2.22/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.22/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.22/architecture/versions.md b/docs/versioned_docs/version-2.22/architecture/versions.md deleted file mode 100644 index 6f06c011b..000000000 --- a/docs/versioned_docs/version-2.22/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.29.15 -* v1.30.11 -* v1.31.7 diff --git a/docs/versioned_docs/version-2.22/getting-started/examples.md b/docs/versioned_docs/version-2.22/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.22/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.22/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.22/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.22/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.22/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.22/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.22/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.22/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.22/getting-started/first-steps.md b/docs/versioned_docs/version-2.22/getting-started/first-steps.md deleted file mode 100644 index fb8437a06..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/first-steps.md +++ /dev/null @@ -1,235 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.22/getting-started/install.md b/docs/versioned_docs/version-2.22/getting-started/install.md deleted file mode 100644 index f120b865a..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/install.md +++ /dev/null @@ -1,442 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.roles.create` -* `iam.roles.delete` -* `iam.roles.get` -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_OPENSTACK_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.22/getting-started/marketplaces.md b/docs/versioned_docs/version-2.22/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.22/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.22/intro.md b/docs/versioned_docs/version-2.22/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.22/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.22/overview/clouds.md b/docs/versioned_docs/version-2.22/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.22/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.22/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.22/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.22/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.22/overview/license.md b/docs/versioned_docs/version-2.22/overview/license.md deleted file mode 100644 index 34122c025..000000000 --- a/docs/versioned_docs/version-2.22/overview/license.md +++ /dev/null @@ -1,33 +0,0 @@ -# License - -## Source code - -Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -## Binaries - -Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). - -These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Terraform provider - -Edgeless Systems provides a [Terraform provider](https://github.com/edgelesssys/terraform-provider-constellation/releases), which may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. - -You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. - -## Community License - -You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. - -## Enterprise License - -Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.22/overview/performance/application.md b/docs/versioned_docs/version-2.22/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.22/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.22/overview/performance/compute.md b/docs/versioned_docs/version-2.22/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.22/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.22/overview/performance/io.md b/docs/versioned_docs/version-2.22/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.22/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.22/overview/performance/performance.md b/docs/versioned_docs/version-2.22/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.22/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.22/overview/product.md b/docs/versioned_docs/version-2.22/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.22/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.22/overview/security-benefits.md b/docs/versioned_docs/version-2.22/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.22/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.22/reference/cli.md b/docs/versioned_docs/version-2.22/reference/cli.md deleted file mode 100644 index 7cbc0be8d..000000000 --- a/docs/versioned_docs/version-2.22/reference/cli.md +++ /dev/null @@ -1,873 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster -* [ssh](#constellation-ssh): Generate a certificate for emergency SSH access - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.30") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --prefix string Prefix for the service account ID and VM ID that will be created (required) - Must be letters, digits, or hyphens. - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation ssh - -Generate a certificate for emergency SSH access - -### Synopsis - -Generate a certificate for emergency SSH access to your SSH-enabled constellation cluster. - -``` -constellation ssh [flags] -``` - -### Options - -``` - -h, --help help for ssh - --key string the path to an existing SSH public key -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.22/reference/migration.md b/docs/versioned_docs/version-2.22/reference/migration.md deleted file mode 100644 index 36bfb1462..000000000 --- a/docs/versioned_docs/version-2.22/reference/migration.md +++ /dev/null @@ -1,134 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrating from CLI versions before 2.21.1 - -### AWS - -* AWS clusters that use `LoadBalancer` resources require more IAM permissions. Please upgrade your IAM roles using `constellation iam upgrade apply`. This will show necessary changes and apply them, if desired. - -## Migrating from CLI versions before 2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from CLI versions before 2.18.0 - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - -## Migrating from CLI versions before 2.10 - -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | - -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.22/reference/slsa.md b/docs/versioned_docs/version-2.22/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.22/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.22/reference/terraform.md b/docs/versioned_docs/version-2.22/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.22/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.22/workflows/cert-manager.md b/docs/versioned_docs/version-2.22/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.22/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.22/workflows/config.md b/docs/versioned_docs/version-2.22/workflows/config.md deleted file mode 100644 index 7868ff1be..000000000 --- a/docs/versioned_docs/version-2.22/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.22/workflows/create.md b/docs/versioned_docs/version-2.22/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.22/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.22/workflows/lb.md b/docs/versioned_docs/version-2.22/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.22/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.22/workflows/recovery.md b/docs/versioned_docs/version-2.22/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.22/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.22/workflows/reproducible-builds.md b/docs/versioned_docs/version-2.22/workflows/reproducible-builds.md deleted file mode 100644 index e3bc46095..000000000 --- a/docs/versioned_docs/version-2.22/workflows/reproducible-builds.md +++ /dev/null @@ -1,63 +0,0 @@ -# Reproduce released artifacts - -Constellation has first-class support for [reproducible builds](https://reproducible-builds.org). -Reproducing the released artifacts is an alternative to [signature verification](verify-cli.md) that doesn't require trusting Edgeless Systems' release process. -The following sections describe how to rebuild an artifact and how Constellation ensures that the rebuild reproduces the artifacts bit-by-bit. - -## Build environment prerequisites - -The build systems used by Constellation - [Bazel](https://bazel.build/) and [Nix](https://nixos.org) - are designed for deterministic, reproducible builds. -These two dependencies should be the only prerequisites for a successful build. -However, it can't be ruled out completely that peculiarities of the host affect the build result. -Thus, we recommend the following host setup for best results: - -1. A Linux operating system not older than v5.4. -2. The GNU C library not older than v2.31 (avoid `musl`). -3. GNU `coreutils` not older than v8.30 (avoid `busybox`). -4. An `ext4` filesystem for building. -5. AppArmor turned off. - -This is given, for example, on an Ubuntu 22.04 system, which is also used for reproducibility tests. - -:::note - -To avoid any backwards-compatibility issues, the host software versions should also not be much newer than the Constellation release. - -::: - -## Run the build - -The following instructions outline qualitatively how to reproduce a build. -Constellation implements these instructions in the [Reproducible Builds workflow](https://github.com/edgelesssys/constellation/actions/workflows/reproducible-builds.yml), which continuously tests for reproducibility. -The workflow is a good place to look up specific version numbers and build steps. - -1. Check out the Constellation repository at the tag corresponding to the release. - - ```bash - git clone https://github.com/edgelesssys/constellation.git - cd constellation - git checkout v2.20.0 - ``` - -2. [Install the Bazel release](https://bazel.build/install) specified in `.bazelversion`. -3. [Install Nix](https://nixos.org/download/) (any recent version should do). -4. Run the build with `bazel build $target` for one of the following targets of interest: - - ```data - //cli:cli_enterprise_darwin_amd64 - //cli:cli_enterprise_darwin_arm64 - //cli:cli_enterprise_linux_amd64 - //cli:cli_enterprise_linux_arm64 - //cli:cli_enterprise_windows_amd64 - ``` - -5. Compare the build result with the downloaded release artifact. - - - -## Feedback - -Reproduction failures often indicate a bug in the build system or in the build definitions. -Therefore, we're interested in any reproducibility issues you might encounter. -[Start a bug report](https://github.com/edgelesssys/constellation/issues/new/choose) and describe the details of your build environment. -Make sure to include your result binary or a [`diffoscope`](https://diffoscope.org/) report, if possible. diff --git a/docs/versioned_docs/version-2.22/workflows/s3proxy.md b/docs/versioned_docs/version-2.22/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.22/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.22/workflows/sbom.md b/docs/versioned_docs/version-2.22/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.22/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.22/workflows/scale.md b/docs/versioned_docs/version-2.22/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.22/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.22/workflows/storage.md b/docs/versioned_docs/version-2.22/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.22/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.22/workflows/terraform-provider.md b/docs/versioned_docs/version-2.22/workflows/terraform-provider.md deleted file mode 100644 index c7a795d3f..000000000 --- a/docs/versioned_docs/version-2.22/workflows/terraform-provider.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.22/workflows/troubleshooting.md b/docs/versioned_docs/version-2.22/workflows/troubleshooting.md deleted file mode 100644 index 903c829e0..000000000 --- a/docs/versioned_docs/version-2.22/workflows/troubleshooting.md +++ /dev/null @@ -1,200 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` - -### Emergency SSH access - -Emergency SSH access to nodes can be useful to diagnose issues or download important data even if the Kubernetes API isn't reachable anymore. - -1. Enter the `constellation-terraform` directory in your Constellation workspace and enable emergency SSH access to the cluster: - - ```bash - cd constellation-terraform - echo "emergency_ssh = true" >> ./terraform.tfvars - terraform apply - ``` - -2. Sign an existing SSH key with your master secret: - - ```bash - cd ../ # go back to your Constellation workspace - constellation ssh --key your_public_key.pub - ``` - - A certificate is written to `constellation_cert.pub`. - - The certificate is valid for 24 hours and enables you to access your Constellation nodes using - [certificate based authentication](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Certificate-based_Authentication). - -3. Now you can connect to any Constellation node using your certificate and your private key. - - ```bash - ssh -o CertificateFile=constellation_cert.pub -i root@ - ``` - - Normally, you don't have access to the Constellation nodes since they reside in a private network. - To access those nodes anyways, you can use your Constellation load balancer as a proxy jump host. - For this, use something along the following SSH client configuration: - - ```text - Host - ProxyJump none - - Host * - IdentityFile - PreferredAuthentications publickey - CertificateFile=constellation_cert.pub - User root - ProxyJump - ``` - - With this configuration you can connect to a Constellation node using `ssh -F `. - You can obtain the private node IP and the domain name of the load balancer using your CSP's web UI. diff --git a/docs/versioned_docs/version-2.22/workflows/trusted-launch.md b/docs/versioned_docs/version-2.22/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.22/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.22/workflows/upgrade.md b/docs/versioned_docs/version-2.22/workflows/upgrade.md deleted file mode 100644 index 3db2ecad6..000000000 --- a/docs/versioned_docs/version-2.22/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.22/workflows/verify-cli.md b/docs/versioned_docs/version-2.22/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.22/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.22/workflows/verify-cluster.md b/docs/versioned_docs/version-2.22/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.22/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.23/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.23/_media/SLSA-Badge-full-level3.svg deleted file mode 100644 index 7154d4a13..000000000 --- a/docs/versioned_docs/version-2.23/_media/SLSA-Badge-full-level3.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_bw.png deleted file mode 100644 index a82ebe2d0..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_iops.png deleted file mode 100644 index 1723257a8..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_fio_azure_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_bw.png deleted file mode 100644 index 4f0ecc94b..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_bw.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_iops.png deleted file mode 100644 index 571086da2..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_fio_gcp_iops.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_azure.png deleted file mode 100644 index 9130349c7..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_gcp.png deleted file mode 100644 index a41557e96..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2p_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_azure.png deleted file mode 100644 index d83e17f5a..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_azure.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_gcp.png deleted file mode 100644 index 55916a1de..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_net_p2svc_gcp.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/max_latency.png deleted file mode 100644 index 696250181..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/max_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/mean_latency.png deleted file mode 100644 index 3b43298ac..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/mean_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/min_latency.png deleted file mode 100644 index 1046df67e..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/min_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/p99_latency.png deleted file mode 100644 index 0190118b2..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/benchmark_vault/5replicas/p99_latency.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/concept-constellation.svg b/docs/versioned_docs/version-2.23/_media/concept-constellation.svg deleted file mode 100644 index 30d32bf6d..000000000 --- a/docs/versioned_docs/version-2.23/_media/concept-constellation.svg +++ /dev/null @@ -1,460 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.23/_media/concept-managed.svg b/docs/versioned_docs/version-2.23/_media/concept-managed.svg deleted file mode 100644 index 5645a608f..000000000 --- a/docs/versioned_docs/version-2.23/_media/concept-managed.svg +++ /dev/null @@ -1,591 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.23/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.23/_media/constellation_oneline.svg deleted file mode 100644 index 4e354958a..000000000 --- a/docs/versioned_docs/version-2.23/_media/constellation_oneline.svg +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - diff --git a/docs/versioned_docs/version-2.23/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.23/_media/example-emojivoto.jpg deleted file mode 100644 index 4be0d5b26..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/example-emojivoto.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.23/_media/example-online-boutique.jpg deleted file mode 100644 index 026f0d865..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/example-online-boutique.jpg and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.23/_media/recovery-gcp-serial-console-link.png deleted file mode 100644 index eb67f0e99..000000000 Binary files a/docs/versioned_docs/version-2.23/_media/recovery-gcp-serial-console-link.png and /dev/null differ diff --git a/docs/versioned_docs/version-2.23/_media/tcb.svg b/docs/versioned_docs/version-2.23/_media/tcb.svg deleted file mode 100644 index e5bcb5b95..000000000 --- a/docs/versioned_docs/version-2.23/_media/tcb.svg +++ /dev/null @@ -1,535 +0,0 @@ - - diff --git a/docs/versioned_docs/version-2.23/architecture/attestation.md b/docs/versioned_docs/version-2.23/architecture/attestation.md deleted file mode 100644 index 9bd157460..000000000 --- a/docs/versioned_docs/version-2.23/architecture/attestation.md +++ /dev/null @@ -1,409 +0,0 @@ -# Attestation - -This page explains Constellation's attestation process and highlights the cornerstones of its trust model. - -## Terms - -The following lists terms and concepts that help to understand the attestation concept of Constellation. - -### Trusted Platform Module (TPM) - -A TPM chip is a dedicated tamper-resistant crypto-processor. -It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). -When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). - -### Runtime measurement - -A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. - -### Platform Configuration Register (PCR) - -A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. -To store a new value in a PCR, the existing value is extended with a new value as follows: - -``` -PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) -``` - -The PCRs are typically used to store runtime measurements. -The new value of a PCR is always an extension of the existing value. -Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. - -### Measured boot - -Measured boot builds on the concept of chained runtime measurements. -Each component in the boot chain loads and measures the next component into the PCR before executing it. -By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. - -### Remote attestation (RA) - -Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. -In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. -The statement can then be verified and compared to a set of trusted reference values. -This way, the integrity of the platform can be ensured before sharing secrets with it. - -### Confidential virtual machine (CVM) - -Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). -With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. -After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. -The secure processor locks these pages and generates an attestation report on the initial page measurements. -CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. -The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. -Such an attestation statement guarantees the confidentiality and integrity of a CVM. - -### Attested TLS (aTLS) - -In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. - -aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. -Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. - -The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). - -## Overview - -The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. -From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. - -The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. -Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. -Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. - -The following explains the details of both steps. - -## Node attestation - -The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. -The solution is a verifiable boot chain and an integrity-protected runtime environment. - -Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. -Outside of CC, this is usually implemented via TPMs. -CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. -For simplicity, TPM terminology like *PCR* is used in the following. - -When a Constellation node image boots inside a CVM, measured boot is used for all stages and components of the boot chain. -This process goes up to the root filesystem. -The root filesystem is mounted read-only with integrity protection. -For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. -Any changes to the image will inevitably also change the corresponding PCR values. -To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. -This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. - -In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md) that irrevocably marks the node as initialized. -The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. -Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. - -To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. -If successful, the measurements are verified against the trusted values of the particular Constellation release version. -Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). - -### Runtime measurements - -Constellation uses runtime measurements to implement the measured boot approach. -As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. -The following gives a detailed description of the available measurements in the different cloud environments. - -The runtime measurements consist of two types of values: - -* **Measurements produced by the cloud infrastructure and firmware of the CVM**: -These are measurements of closed-source firmware and other values controlled by the cloud provider. -While not being reproducible for the user, some of them can be compared against previously observed values. -Others may change frequently and aren't suitable for verification. -The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. - -* **Measurements produced by the Constellation bootloader and boot chain**: -The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). -The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. -It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. - -Constellation allows to specify in the config which measurements should be enforced during the attestation process. -Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. -By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - - - -Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | AWS | No | -| 1 | Firmware | AWS | No | -| 2 | Firmware | AWS | No | -| 3 | Firmware | AWS | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | -| 5 | Firmware | AWS | No | -| 6 | Firmware | AWS | No | -| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. -This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | Azure | No | -| 1 | Firmware | Azure | No | -| 2 | Firmware | Azure | No | -| 3 | Firmware | Azure | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | -| 5 | Reserved | Azure | No | -| 6 | VM Unique ID | Azure | No | -| 7 | Secure Boot State | Azure, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. -Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | CVM version and technology | GCP | No | -| 1 | Firmware | GCP | No | -| 2 | Firmware | GCP | No | -| 3 | Firmware | GCP | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | -| 5 | Disk GUID partition table | GCP | No | -| 6 | Disk GUID partition table | GCP | No | -| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -Constellation uses a hypervisor-based vTPM for runtime measurements. - -The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. -The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). - -The following table lists all PCR values of the vTPM and the measured components. -It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. -The latter means that the value can be generated offline and compared to the one in the vTPM. - -| PCR | Components | Measured by | Reproducible and verifiable | -| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- | -| 0 | Firmware | STACKIT | No | -| 1 | Firmware | STACKIT | No | -| 2 | Firmware | STACKIT | No | -| 3 | Firmware | STACKIT | No | -| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes | -| 5 | Firmware | STACKIT | No | -| 6 | Firmware | STACKIT | No | -| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No | -| 8 | - | - | - | -| 9 | initramfs, Kernel command line | Linux Kernel | Yes | -| 10 | User space | Linux IMA | No[^1] | -| 11 | Unified Kernel Image components | Constellation Bootloader | Yes | -| 12 | Reserved | (User space, Constellation Bootloader) | Yes | -| 13 | Reserved | (Constellation Bootloader) | Yes | -| 14 | Secure Boot State | Constellation Bootloader | No | -| 15 | ClusterID | Constellation Bootstrapper | Yes | -| 16–23 | Unused | - | - | - - - - -### CVM verification - -To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. -For verification of the CVM technology, Constellation may expose additional options in its config file. - - - - -On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* Firmware Signer - - This config option allows you to specify how the firmware signer should be verified. - More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. - You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - - - -On GCP, AMD SEV-SNP is used to provide runtime encryption to the VMs. -An SEV-SNP attestation report is used to establish trust in the VM. -You may customize certain parameters for verification of the attestation statement using the Constellation config file. - -* TCB versions - - You can set the minimum version numbers of components in the SEV-SNP TCB. - Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. - Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. - -* AMD Root Key Certificate - - This certificate is the root of trust for verifying the SEV-SNP certificate chain. - -* AMD Signing Key Certificate - - This is the intermediate certificate for verifying the SEV-SNP report's signature. - If it's not specified, the CLI fetches it from the AMD key distribution server. - - - - -On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs. -The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements). -There is no additional configuration available for STACKIT. - - - - -## Cluster attestation - -Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. -User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. -By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. - -### Cluster-facing attestation - -The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. -During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). -During the handshake, the node transmits an attestation statement including its runtime measurements. -The *JoinService* verifies that statement and compares the measurements against the ground truth. -For details of the initialization process check the [microservice descriptions](microservices.md). - -After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. -When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. - -### User-facing attestation - -The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. -A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. - -## Putting it all together - -This section puts the aforementioned concepts together and illustrate how trust into a Constellation cluster is established and maintained. - -### CLI and node images - -It all starts with the CLI executable. The CLI is signed by Edgeless Systems. To ensure non-repudiability for CLI releases, Edgeless Systems publishes corresponding signatures to the public ledger of the [sigstore project](https://www.sigstore.dev/). There's a [step-by-step guide](../workflows/verify-cli.md) on how to verify CLI signatures based on sigstore. - -The CLI contains the latest runtime measurements of the Constellation node image for all supported cloud platforms. In case a different version of the node image is to be used, the corresponding runtime measurements can be fetched using the CLI's [fetch-measurements command](../reference/cli.md#constellation-config-fetch-measurements). This command downloads the runtime measurements and the corresponding signature from cdn.confidential.cloud. See for example the following files corresponding to node image v2.16.3: - -* [Measurements](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json) -* [Signature](https://cdn.confidential.cloud/constellation/v2/ref/-/stream/stable/v2.16.3/image/measurements.json.sig) - -The CLI contains the long-term public key of Edgeless Systems to verify the signature of downloaded runtime measurements. - -### Cluster creation - -When a cluster is [created](../workflows/create.md), the CLI automatically verifies the runtime measurements of the *first node* using remote attestation. Based on this, the CLI and the first node set up a temporary TLS connection. This [aTLS](#attested-tls-atls) connection is used for two things: - -1. The CLI sends the [master secret](../architecture/keys.md#master-secret) of the to-be-created cluster to the CLI. The master secret is generated by the first node. -2. The first node sends a [kubeconfig file](https://www.redhat.com/sysadmin/kubeconfig) with Kubernetes credentials to the CLI. - -After this, the aTLS connection is closed and the first node bootstraps the Kubernetes cluster. All subsequent interactions between the CLI and the cluster go via the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) server running inside the cluster. The CLI (and other tools like kubectl) use the credentials referenced by the kubeconfig file to authenticate themselves towards the Kubernetes API server and to establish a mTLS connection. - -The CLI connects to the Kubernetes API to write the runtime measurements for the applicable node image to etcd. The JoinService uses these runtime measurements to verify all nodes that join the cluster subsequently. - -### Chain of trust - -In summary, there's a chain of trust based on cryptographic signatures that goes from the user to the cluster via the CLI. This is illustrated in the following diagram. - -```mermaid -flowchart LR - A[User]-- "verifies" -->B[CLI] - B[CLI]-- "verifies" -->C([Runtime measurements]) - D[Edgeless Systems]-- "signs" -->B[CLI] - D[Edgeless Systems]-- "signs" -->C([Runtime measurements]) - B[CLI]-- "verifies (remote attestation)" -->E[First node] - E[First node]-- "verifies (remote attestation)" -->F[Other nodes] - C([Runtime measurements]) -.-> E[First node] - C([Runtime measurements]) -.-> F[Other nodes] -``` - -### Upgrades - -Whenever a cluster is [upgraded](../workflows/upgrade.md) to a new version of the node image, the CLI sends the corresponding runtime measurements via the Kubernetes API server. The new runtime measurements are stored in etcd within the cluster and replace any previous runtime measurements. The new runtime measurements are then used automatically by the JoinService for the verification of new nodes. - -## References - -[^1]: Linux IMA produces runtime measurements of user-space binaries. -However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. -Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.23/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.23/architecture/encrypted-storage.md deleted file mode 100644 index f047fa4a9..000000000 --- a/docs/versioned_docs/version-2.23/architecture/encrypted-storage.md +++ /dev/null @@ -1,62 +0,0 @@ -# Encrypted persistent storage - -Confidential VMs provide runtime memory encryption to protect data in use. -In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. -Consider a front-end web server, for example, that keeps all connection information cached in main memory. -No sensitive data is ever written to an insecure medium. -However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. -As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. -These CSI storage solutions often support some sort of encryption. -For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. - -## Cloud provider-managed encryption - -CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. -In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. -Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. -It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. -Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. - -In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. -Consequently, using CSP-managed encryption of persistent storage usually isn't an option. - -## Constellation-managed encryption - -Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. -Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. -All cryptographic operations happen inside the trusted environment of the confidential Constellation node. - -Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. - -By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). -The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). -This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. - -Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. - -Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. - -Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. -Data at rest is secured without any additional actions required by the developer. - -## Cryptographic algorithms - -This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. - -### dm-crypt - -To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. -The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. -For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. - -### dm-integrity - -To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). -When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. -The tag size is 32 Bytes. - -## Encrypted S3 object storage - -Constellation comes with a service that you can use to transparently retrofit client-side encryption to existing applications that use S3 (AWS or compatible) for storage. -To learn more, check out the [s3proxy documentation](../workflows/s3proxy.md). diff --git a/docs/versioned_docs/version-2.23/architecture/images.md b/docs/versioned_docs/version-2.23/architecture/images.md deleted file mode 100644 index 8a9c51d36..000000000 --- a/docs/versioned_docs/version-2.23/architecture/images.md +++ /dev/null @@ -1,49 +0,0 @@ -# Constellation images - -Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. -The Constellation images provide measured boot and an immutable filesystem. - -## Measured boot - -```mermaid -flowchart LR - Firmware --> Bootloader - Bootloader --> uki - subgraph uki[Unified Kernel Image] - Kernel[Kernel] - initramfs[Initramfs] - cmdline[Kernel Command Line] - end - uki --> rootfs[Root Filesystem] -``` - -Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. - -### Firmware - -With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. - -### Bootloader - -The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. - -### initramfs - -The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. - -dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. - -After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. - -## State disk - -In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. -This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. -Such data can contain sensitive information and, therefore, must be stored securely. -To that end, the state disk is protected by authenticated encryption. -See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. - -## Kubernetes components - -During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. -They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.23/architecture/keys.md b/docs/versioned_docs/version-2.23/architecture/keys.md deleted file mode 100644 index 49821cd0b..000000000 --- a/docs/versioned_docs/version-2.23/architecture/keys.md +++ /dev/null @@ -1,130 +0,0 @@ -# Key management and cryptographic primitives - -Constellation protects and isolates your cluster and workloads. -To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. -Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. -The following gives an overview of the architecture and explains the technical details. - -## Confidential VMs - -Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. -For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. - -## Master secret - -The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). -It's generated during the bootstrapping of a Constellation cluster. -It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). -In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. - -## Cluster identity - -The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): - -The **base measurements** represent the identity of a valid, uninitialized Constellation node. -They depend on the node image, but are otherwise the same for every Constellation cluster. -On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). - -The **clusterID** represents the identity of a single initialized Constellation cluster. -It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. -The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. -See [Node attestation](attestation.md#node-attestation) for details. - -The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. - -## Network encryption - -Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -See [network encryption](networking.md) for more details. - -The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. -Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. -A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. -Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). -WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). - -## Storage encryption - -Constellation supports transparent encryption of persistent storage. -The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. -Currently, the following primitives are used for block storage encryption: - -* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) -* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) - -Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. -See [encrypted storage](encrypted-storage.md) for more details. - -As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: - -* Constellation-managed key management -* User-managed key management - -### Constellation-managed key management - -#### Key material and key derivation - -During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. -This means creating two clusters with the same master secret will yield the same KEK. -Any data encryption key (DEK) is derived from the KEK via HKDF. -Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). - -#### State and storage - -The KEK is derived from the master secret during the initialization. -Subsequently, all other key material is derived from the KEK. -Given the same KEK, any DEK can be derived deterministically from a given identifier. -Hence, there is no need to store DEKs. They can be derived on demand. -After the KEK was derived, it's stored in memory only and never leaves the CVM context. - -#### Availability - -Constellation-managed key management has the same availability as the underlying Kubernetes cluster. -Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. -The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. - -#### Recovery - -Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. -For details on the process see the [recovery workflow](../workflows/recovery.md). - -### User-managed key management - -User-managed key management is under active development and will be available soon. -In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. -For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). - -During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. -This follows the common scheme of "bring your own key" (BYOK). -Constellation will support several KMSs for managing the storage and access of your KEK. -Initially, it will support the following KMSs: - -* [AWS KMS](https://aws.amazon.com/kms/) -* [GCP KMS](https://cloud.google.com/security-key-management) -* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) -* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) - -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). -In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. -Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. - -KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. -This follows the common scheme of "hold your own key" (HYOK). - -The KEK is used to encrypt per-data "data encryption keys" (DEKs). -DEKs are generated to encrypt your data before storing it on persistent storage. -After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. -Currently, Constellation supports the following cloud storage options: - -* [AWS S3](https://aws.amazon.com/s3/) -* [GCP Cloud Storage](https://cloud.google.com/storage) -* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) - -The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. -Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. - -#### Recovery and migration - -In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. -In case of migration, configuring the same KEK will provide seamless migration of data. -Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.23/architecture/microservices.md b/docs/versioned_docs/version-2.23/architecture/microservices.md deleted file mode 100644 index 90bae783b..000000000 --- a/docs/versioned_docs/version-2.23/architecture/microservices.md +++ /dev/null @@ -1,73 +0,0 @@ -# Microservices - -Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. -During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. -These features are provided by several microservices: - -* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster -* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster -* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality -* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys - -The relations between microservices are shown in the following diagram: - -```mermaid -flowchart LR - subgraph admin [Admin's machine] - A[Constellation CLI] - end - subgraph img [Constellation OS image] - B[Constellation OS] - C[Bootstrapper] - end - subgraph Kubernetes - D[JoinService] - E[KeyService] - F[VerificationService] - end - A -- deploys --> - B -- starts --> C - C -- deploys --> D - C -- deploys --> E - C -- deploys --> F -``` - -## Bootstrapper - -The *Bootstrapper* is the first microservice launched after booting a Constellation node image. -It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. -To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. -The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. -Otherwise, it waits for an initialization request to create a new Kubernetes cluster. - -## JoinService - -The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. -New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). -The *JoinService* verifies the new node's certificate and attestation statement. -If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. - - -```mermaid -sequenceDiagram - participant New node - participant JoinService - New node->>JoinService: aTLS handshake (server side verification) - JoinService-->>New node: # - New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) - JoinService->>+KeyService: GetDataKey(DiskUUID) - KeyService-->>-JoinService: DiskEncryptionKey - JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... -``` - -## VerificationService - -The *VerificationService* runs as DaemonSet on each node. -It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). -Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. - -## KeyService - -The *KeyService* runs as DaemonSet on each control-plane node. -It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. -Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.23/architecture/networking.md b/docs/versioned_docs/version-2.23/architecture/networking.md deleted file mode 100644 index e9cbdf029..000000000 --- a/docs/versioned_docs/version-2.23/architecture/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Network encryption - -Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). -To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. -Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). -Currently, Constellation only supports WireGuard as the encryption engine. -You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). - -Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. -With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). -Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. -This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. - -When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) -that can cause pod-to-pod traffic to be unencrypted. -To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. -This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. -The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. - -Traffic originating from hosts isn't encrypted yet. -This mainly includes health checks from Kubernetes API server. -Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.23/architecture/observability.md b/docs/versioned_docs/version-2.23/architecture/observability.md deleted file mode 100644 index 0f4daffd4..000000000 --- a/docs/versioned_docs/version-2.23/architecture/observability.md +++ /dev/null @@ -1,74 +0,0 @@ -# Observability - -In Kubernetes, observability is the ability to gain insight into the behavior and performance of applications. -It helps identify and resolve issues more effectively, ensuring stability and performance of Kubernetes workloads, reducing downtime and outages, and improving efficiency. -The "three pillars of observability" are logs, metrics, and traces. - -In the context of Confidential Computing, observability is a delicate subject and needs to be applied such that it doesn't leak any sensitive information. -The following gives an overview of where and how you can apply standard observability tools in Constellation. - -## Cloud resource monitoring - -While inaccessible, Constellation's nodes are still visible as black box VMs to the hypervisor. -Resource consumption, such as memory and CPU utilization, can be monitored from the outside and observed via the cloud platforms directly. -Similarly, other resources, such as storage and network and their respective metrics, are visible via the cloud platform. - -## Metrics - -Metrics are numeric representations of data measured over intervals of time. They're essential for understanding system health and gaining insights using telemetry signals. - -By default, Constellation exposes the [metrics for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) inside the cluster. -Similarly, the [etcd metrics](https://etcd.io/docs/v3.5/metrics/) endpoints are exposed inside the cluster. -These [metrics endpoints can be disabled](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#disabling-metrics). - -You can collect these cluster-internal metrics via tools such as [Prometheus](https://prometheus.io/) or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -Constellation's CNI Cilium also supports [metrics via Prometheus endpoints](https://docs.cilium.io/en/latest/observability/metrics/). -However, in Constellation, they're disabled by default and must be enabled first. - -## Logs - -Logs represent discrete events that usually describe what's happening with your service. -The payload is an actual message emitted from your system along with a metadata section containing a timestamp, labels, and tracking identifiers. - -### System logs - -Detailed system-level logs are accessible via `/var/log` and [journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) on the nodes directly. -They can be collected from there, for example, via [Filebeat and Logstash](https://www.elastic.co/guide/en/beats/filebeat/current/logstash-output.html), which are tools of the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -In case of an error during the initialization, the CLI automatically collects the [Bootstrapper](./microservices.md#bootstrapper) logs and returns these as a file for [troubleshooting](../workflows/troubleshooting.md). Here is an example of such an event: - -```shell-session -Cluster initialization failed. This error is not recoverable. -Terminate your cluster and try again. -Fetched bootstrapper logs are stored in "constellation-cluster.log" -``` - -### Kubernetes logs - -Constellation supports the [Kubernetes logging architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/). -By default, logs are written to the nodes' encrypted state disks. -These include the Pod and container logs and the [system component logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/#system-component-logs). - -[Constellation services](microservices.md) run as Pods inside the `kube-system` namespace and use the standard container logging mechanism. -The same applies for the [Cilium Pods](https://docs.cilium.io/en/latest/operations/troubleshooting/#logs). - -You can collect logs from within the cluster via tools such as [Fluentd](https://github.com/fluent/fluentd), [Loki](https://github.com/grafana/loki), or the [Elastic Stack](https://www.elastic.co/de/elastic-stack/). - -## Traces - -Modern systems are implemented as interconnected complex and distributed microservices. Understanding request flows and system communications is challenging, mainly because all systems in a chain need to be modified to propagate tracing information. Distributed tracing is a new approach to increasing observability and understanding performance bottlenecks. A trace represents consecutive events that reflect an end-to-end request path in a distributed system. - -Constellation supports [traces for Kubernetes system components](https://kubernetes.io/docs/concepts/cluster-administration/system-traces/). -By default, they're disabled and need to be enabled first. - -Similarly, Cilium can be enabled to [export traces](https://cilium.io/use-cases/metrics-export/). - -You can collect these traces via tools such as [Jaeger](https://www.jaegertracing.io/) or [Zipkin](https://zipkin.io/). - -## Integrations - -Platforms and SaaS solutions such as Datadog, logz.io, Dynatrace, or New Relic facilitate the observability challenge for Kubernetes and provide all-in-one SaaS solutions. -They install agents into the cluster that collect metrics, logs, and tracing information and upload them into the data lake of the platform. -Technically, the agent-based approach is compatible with Constellation, and attaching these platforms is straightforward. -However, you need to evaluate if the exported data might violate Constellation's compliance and privacy guarantees by uploading them to a third-party platform. diff --git a/docs/versioned_docs/version-2.23/architecture/orchestration.md b/docs/versioned_docs/version-2.23/architecture/orchestration.md deleted file mode 100644 index 3c8d529e7..000000000 --- a/docs/versioned_docs/version-2.23/architecture/orchestration.md +++ /dev/null @@ -1,83 +0,0 @@ -# Orchestrating Constellation clusters - -You can use the CLI to create a cluster on the supported cloud platforms. -The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. -It uses a set of parameters and an optional configuration file to manage your cluster installation. -The CLI is also used for updating your cluster. - -## Workspaces - -Each Constellation cluster has an associated *workspace*. -The workspace is where data such as the Constellation state and config files are stored. -Each workspace is associated with a single cluster and configuration. -The CLI stores state in the local filesystem making the current directory the active workspace. -Multiple clusters require multiple workspaces, hence, multiple directories. -Note that every operation on a cluster always has to be performed from the directory associated with its workspace. - -You may copy files from the workspace to other locations, -but you shouldn't move or delete them while the cluster is still being used. -The Constellation CLI takes care of managing the workspace. -Only when a cluster was terminated, and you are sure the files aren't needed anymore, should you remove a workspace. - -## Cluster creation process - -To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. - -Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: - -* a configuration file -* a state file -* a Base64-encoded master secret -* [Terraform artifacts](../reference/terraform.md), stored in subdirectories -* a Kubernetes `kubeconfig` file. - -After the initialization of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. -This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. -In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in the state file. - -### Creation process details - -1. The CLI `apply` command first creates the confidential VM (CVM) resources in your cloud environment and configures the network -2. Each CVM boots the Constellation node image and measures every component in the boot chain -3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) -4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster -5. The CLI then connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster -6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) -7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* -8. As part of the join request each node includes an attestation statement of its boot measurements as authentication -9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster -10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) - -## Post-installation configuration - -Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). -The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. -Once configured, orchestrate the Kubernetes cluster via `kubectl`. - -After the initialization, the CLI will present you with a couple of tokens: - -* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) -* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding - -You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). - -The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). -Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. - -The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). - -## Upgrades - -Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. -Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. -You can upgrade a Constellation cluster with a single operation by using the CLI. -For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). - -### Attestation of upgrades - -With every new image, corresponding measurements are released. -During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. -New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. -The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. - - diff --git a/docs/versioned_docs/version-2.23/architecture/overview.md b/docs/versioned_docs/version-2.23/architecture/overview.md deleted file mode 100644 index 386f93b2f..000000000 --- a/docs/versioned_docs/version-2.23/architecture/overview.md +++ /dev/null @@ -1,30 +0,0 @@ -# Overview - -Constellation is a cloud-based confidential orchestration platform. -The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. -To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). - -## About orchestration and updates - -As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. -Updates are provided in accordance with the [support policy](versions.md). - -## About microservices and attestation - -Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). - -## About node images and verified boot - -Constellation comes with operating system images for Kubernetes control-plane and worker nodes. -They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. -You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. - -## About key management and cryptographic primitives - -Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). - -## About observability - -Observability in Kubernetes refers to the capability to troubleshoot issues using telemetry signals such as logs, metrics, and traces. -In the realm of Confidential Computing, it's crucial that observability aligns with confidentiality, necessitating careful implementation. -Learn more about the [observability capabilities in Constellation](./observability.md). diff --git a/docs/versioned_docs/version-2.23/architecture/versions.md b/docs/versioned_docs/version-2.23/architecture/versions.md deleted file mode 100644 index d5cbc987b..000000000 --- a/docs/versioned_docs/version-2.23/architecture/versions.md +++ /dev/null @@ -1,21 +0,0 @@ -# Versions and support policy - -All components of Constellation use a three-digit version number of the form `v..`. -The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. - -Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. - -New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). - -## Kubernetes support policy - -Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. -When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. -Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. - -The following Kubernetes versions are currently supported: - - -* v1.29.15 -* v1.30.12 -* v1.31.8 diff --git a/docs/versioned_docs/version-2.23/getting-started/examples.md b/docs/versioned_docs/version-2.23/getting-started/examples.md deleted file mode 100644 index fded84980..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/examples.md +++ /dev/null @@ -1,6 +0,0 @@ -# Examples - -After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? -* [Emojivoto](examples/emojivoto.md): a simple but fun web application -* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices -* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.23/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.23/getting-started/examples/emojivoto.md deleted file mode 100644 index 2bbe27917..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/examples/emojivoto.md +++ /dev/null @@ -1,22 +0,0 @@ -# Emojivoto -[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. - - - -emojivoto - Web UI - - - -1. Deploy the application: - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` -2. Wait until it becomes available: - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - ``` -3. Forward the web service to your machine: - ```bash - kubectl -n emojivoto port-forward svc/web-svc 8080:80 - ``` -4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.23/getting-started/examples/filestash-s3proxy.md b/docs/versioned_docs/version-2.23/getting-started/examples/filestash-s3proxy.md deleted file mode 100644 index b9a394256..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/examples/filestash-s3proxy.md +++ /dev/null @@ -1,107 +0,0 @@ - -# Deploying Filestash - -Filestash is a web frontend for different storage backends, including S3. -It's a useful application to showcase s3proxy in action. - -1. Deploy s3proxy as described in [Deployment](../../workflows/s3proxy.md#deployment). -2. Create a deployment file for Filestash with one pod: - -```sh -cat << EOF > "deployment-filestash.yaml" -apiVersion: apps/v1 -kind: Deployment -metadata: - name: filestash -spec: - replicas: 1 - selector: - matchLabels: - app: filestash - template: - metadata: - labels: - app: filestash - spec: - hostAliases: - - ip: $(kubectl get svc s3proxy-service -o=jsonpath='{.spec.clusterIP}') - hostnames: - - "s3.us-east-1.amazonaws.com" - - "s3.us-east-2.amazonaws.com" - - "s3.us-west-1.amazonaws.com" - - "s3.us-west-2.amazonaws.com" - - "s3.eu-north-1.amazonaws.com" - - "s3.eu-south-1.amazonaws.com" - - "s3.eu-south-2.amazonaws.com" - - "s3.eu-west-1.amazonaws.com" - - "s3.eu-west-2.amazonaws.com" - - "s3.eu-west-3.amazonaws.com" - - "s3.eu-central-1.amazonaws.com" - - "s3.eu-central-2.amazonaws.com" - - "s3.ap-northeast-1.amazonaws.com" - - "s3.ap-northeast-2.amazonaws.com" - - "s3.ap-northeast-3.amazonaws.com" - - "s3.ap-east-1.amazonaws.com" - - "s3.ap-southeast-1.amazonaws.com" - - "s3.ap-southeast-2.amazonaws.com" - - "s3.ap-southeast-3.amazonaws.com" - - "s3.ap-southeast-4.amazonaws.com" - - "s3.ap-south-1.amazonaws.com" - - "s3.ap-south-2.amazonaws.com" - - "s3.me-south-1.amazonaws.com" - - "s3.me-central-1.amazonaws.com" - - "s3.il-central-1.amazonaws.com" - - "s3.af-south-1.amazonaws.com" - - "s3.ca-central-1.amazonaws.com" - - "s3.sa-east-1.amazonaws.com" - containers: - - name: filestash - image: machines/filestash:latest - ports: - - containerPort: 8334 - volumeMounts: - - name: ca-cert - mountPath: /etc/ssl/certs/kube-ca.crt - subPath: kube-ca.crt - volumes: - - name: ca-cert - secret: - secretName: s3proxy-tls - items: - - key: ca.crt - path: kube-ca.crt -EOF -``` - -The pod spec includes the `hostAliases` key, which adds an entry to the pod's `/etc/hosts`. -The entry forwards all requests for any of the currently defined AWS regions to the Kubernetes service `s3proxy-service`. -If you followed the s3proxy [Deployment](../../workflows/s3proxy.md#deployment) guide, this service points to a s3proxy pod. - -The deployment specifies all regions explicitly to prevent accidental data leaks. -If one of your buckets were located in a region that's not part of the `hostAliases` key, traffic towards those buckets would not be redirected to s3proxy. -Similarly, if you want to exclude data for specific regions from going through s3proxy you can remove those regions from the deployment. - -The spec also includes a volume mount for the TLS certificate and adds it to the pod's certificate trust store. -The volume is called `ca-cert`. -The key `ca.crt` of that volume is mounted to `/etc/ssl/certs/kube-ca.crt`, which is the default certificate trust store location for that container's OpenSSL library. -Not adding the CA certificate will result in TLS authentication errors. - -3. Apply the file: `kubectl apply -f deployment-filestash.yaml` - -Afterward, you can use a port forward to access the Filestash pod: -`kubectl port-forward pod/$(kubectl get pod --selector='app=filestash' -o=jsonpath='{.items[*].metadata.name}') 8334:8334` - -4. After browsing to `localhost:8443`, Filestash will ask you to set an administrator password. -After setting it, you can directly leave the admin area by clicking the blue cloud symbol in the top left corner. -Subsequently, you can select S3 as storage backend and enter your credentials. -This will bring you to an overview of your buckets. -If you want to deploy Filestash in production, take a look at its [documentation](https://www.filestash.app/docs/). - -5. To see the logs of s3proxy intercepting requests made to S3, run: `kubectl logs -f pod/$(kubectl get pod --selector='app=s3proxy' -o=jsonpath='{.items[*].metadata.name}')` -Look out for log messages labeled `intercepting`. -There is one such log message for each message that's encrypted, decrypted, or blocked. - -6. Once you have uploaded a file with Filestash, you should be able to view the file in Filestash. -However, if you go to the AWS S3 [Web UI](https://s3.console.aws.amazon.com/s3/home) and download the file you just uploaded in Filestash, you won't be able to read it. -Another way to spot encrypted files without downloading them is to click on a file, scroll to the Metadata section, and look for the header named `x-amz-meta-constellation-encryption`. -This header holds the encrypted data encryption key of the object and is only present on objects that are encrypted by s3proxy. diff --git a/docs/versioned_docs/version-2.23/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.23/getting-started/examples/horizontal-scaling.md deleted file mode 100644 index dfaf9e742..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/examples/horizontal-scaling.md +++ /dev/null @@ -1,98 +0,0 @@ -# Horizontal Pod Autoscaling -This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. - -## Requirements -The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. - -Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. - -:::info -We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. -::: - -## Setup - -1. Install the Kubernetes Metrics Server: - ```bash - kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml - ``` - -2. Deploy the HPA example server that's supposed to be scaled under load. - - This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. - ```bash - cat < - -Online Boutique - Web UI - - - -1. Create a namespace: - ```bash - kubectl create ns boutique - ``` -2. Deploy the application: - ```bash - kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml - ``` -3. Wait for all services to become available: - ```bash - kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments - ``` -4. Get the frontend's external IP address: - ```shell-session - $ kubectl get service frontend-external -n boutique | awk '{print $4}' - EXTERNAL-IP - - ``` - (`` is a placeholder for the IP assigned by your CSP.) -5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.23/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.23/getting-started/first-steps-local.md deleted file mode 100644 index 98f0302de..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/first-steps-local.md +++ /dev/null @@ -1,277 +0,0 @@ -# First steps with a local cluster - -A local cluster lets you deploy and test Constellation without a cloud subscription. -You have two options: - -* Use MiniConstellation to automatically deploy a two-node cluster. -* For more fine-grained control, create the cluster using the QEMU provider. - -Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. - -You need an x64 machine with a Linux OS. -You can use a VM, but it needs nested virtualization. - -## Prerequisites - -* Machine requirements: - * An x86-64 CPU with at least 4 cores (6 cores are recommended) - * At least 4 GB RAM (6 GB are recommended) - * 20 GB of free disk space - * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM -* Software requirements: - * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) - * Recommended: Ubuntu 22.04 LTS - * [Docker](https://docs.docker.com/engine/install/) - * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) - * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes - -### Software installation on Ubuntu - -```bash -# install Docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt update -sudo apt install docker-ce -# install other dependencies -sudo apt install xsltproc -sudo snap install kubectl --classic -# install Constellation CLI -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -sudo install constellation-linux-amd64 /usr/local/bin/constellation -# do not drop forwarded packages -sudo iptables -P FORWARD ACCEPT -``` - -## Create a cluster - - - - - -With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). - - -:::caution - -MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since MiniConstellation runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): - -```bash -constellation mini up -``` - -This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. -All `constellation` commands concerning this cluster need to be issued from this directory. - - - - -With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. - -:::caution - -Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. - -::: - -:::note - -Since Constellation on QEMU runs on your local system, cloud features such as load balancing, -attaching persistent storage, or autoscaling aren't available. - -::: - -1. To set up your local cluster, you need to create a configuration file for Constellation first. - - ```bash - constellation config generate qemu - ``` - - This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. - -2. Now you can create your cluster and its nodes. `constellation apply` uses the options set in `constellation-conf.yaml`. - - ```bash - constellation apply -y - ``` - - The Output should look like the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type 2-vCPUs will be created. - 1 worker node of type 2-vCPUs will be created. - Creating - Cloud infrastructure created successfully. - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your setup, `constellation apply` may take 10+ minutes to complete. - - ::: - -3. Configure kubectl - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - - - -## Connect to the cluster - -Your cluster initially consists of a single control-plane node: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 66s v1.24.6 -``` - -Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). -If verification passes successfully, the new node receives keys and certificates to join the cluster. - -You can follow this process by viewing the logs of the JoinService: - -```shell-session -$ kubectl logs -n kube-system daemonsets/join-service -f -{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} -{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} -... -``` - -Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. -You can check on the state of your cluster by running the following: - -```shell-session -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -control-plane-0 Ready control-plane 2m59s v1.24.6 -worker-0 Ready 32s v1.24.6 -``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation mini down -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -Once you are done, you can clean up the created resources using the following command: - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -This will destroy your cluster and clean up your workspace. -The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - - - -## Troubleshooting - -Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### VMs have no internet access / CLI remains in "Initializing cluster" state - -`iptables` rules may prevent your VMs from accessing the internet. -Make sure your rules aren't dropping forwarded packages. - -List your rules: - -```bash -sudo iptables -S -``` - -The output may look similar to the following: - -```shell-session --P INPUT ACCEPT --P FORWARD DROP --P OUTPUT ACCEPT --N DOCKER --N DOCKER-ISOLATION-STAGE-1 --N DOCKER-ISOLATION-STAGE-2 --N DOCKER-USER -``` - -If your `FORWARD` chain is set to `DROP`, you need to update your rules: - -```bash -sudo iptables -P FORWARD ACCEPT -``` diff --git a/docs/versioned_docs/version-2.23/getting-started/first-steps.md b/docs/versioned_docs/version-2.23/getting-started/first-steps.md deleted file mode 100644 index fb8437a06..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/first-steps.md +++ /dev/null @@ -1,235 +0,0 @@ -# First steps with Constellation - -The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), -and have access to a cloud subscription. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -:::note -If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). -::: - -## Create a cluster - -1. Create the [configuration file](../workflows/config.md) and state file for your cloud provider. If you are following the steps of this guide, there is no need to edit the file. - - - - - ```bash - constellation config generate aws - ``` - - - - - ```bash - constellation config generate azure - ``` - - - - - ```bash - constellation config generate gcp - ``` - - - - - ```bash - constellation config generate stackit - ``` - - - - -2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - - - - - ```bash - constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config - ``` - - This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Depending on the attestation variant selected on config generation, different regions are available. - AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: - * `eu-west-1` - * `us-east-2` - - You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). - - NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - - - - ```bash - constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config - ``` - - This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - - - - - ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test --update-config - ``` - - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. - - Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - - - - - To use Constellation on STACKIT, the cluster will use the User Access Token (UAT) that's generated [during the install step](./install.md). - After creating the accounts, fill in the STACKIT details in `constellation-conf.yaml` under `provider.openstack`: - - * `stackitProjectID`: STACKIT project id (can be found after login on the [STACKIT portal](https://portal.stackit.cloud)) - - :::caution - - `stackitProjectID` refers to the ID of your STACKIT project. The STACKIT portal also shows the OpenStack ID that's associated with your project in some places. Make sure you insert the STACKIT project ID in the `constellation-conf.yaml` file. It's of the format `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX`. - - ::: - - - - - :::tip - To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). - ::: - - - -3. Create the cluster. `constellation apply` uses options set in `constellation-conf.yaml`. - If you want to manually manage your cloud resources, for example by using [Terraform](../reference/terraform.md), follow the corresponding instructions in the [Create workflow](../workflows/create.md). - - :::tip - - On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. - - ::: - - ```bash - constellation apply -y - ``` - - This should look similar to the following: - - ```shell-session - $ constellation apply -y - Checking for infrastructure changes - The following Constellation cluster will be created: - 3 control-plane nodes of type n2d-standard-4 will be created. - 1 worker node of type n2d-standard-4 will be created. - Creating - Cloud infrastructure created successfully - Your Constellation master secret was successfully written to ./constellation-mastersecret.json - Connecting - Initializing cluster - Installing Kubernetes components - Your Constellation cluster was successfully initialized. - - Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= - Kubernetes configuration constellation-admin.conf - - You can now connect to your cluster by executing: - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - - The cluster's identifier will be different in your output. - Keep `constellation-mastersecret.json` somewhere safe. - This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. - - :::info - - Depending on your CSP and region, `constellation apply` may take 10+ minutes to complete. - - ::: - -4. Configure kubectl. - - ```bash - export KUBECONFIG="$PWD/constellation-admin.conf" - ``` - -## Deploy a sample application - -1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) - - ```bash - kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment - ``` - -2. Expose the frontend service locally - - ```bash - kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments - kubectl -n emojivoto port-forward svc/web-svc 8080:80 & - curl http://localhost:8080 - kill %1 - ``` - -## Terminate your cluster - -Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). - -```bash -constellation terminate -``` - -This should give the following output: - -```shell-session -$ constellation terminate -You are about to terminate a Constellation cluster. -All of its associated resources will be DESTROYED. -This action is irreversible and ALL DATA WILL BE LOST. -Do you want to continue? [y/n]: -``` - -Confirm with `y` to terminate the cluster: - -```shell-session -Terminating ... -Your Constellation cluster was terminated successfully. -``` - -Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.23/getting-started/install.md b/docs/versioned_docs/version-2.23/getting-started/install.md deleted file mode 100644 index f072407d8..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/install.md +++ /dev/null @@ -1,447 +0,0 @@ -# Installation and setup - -Constellation runs entirely in your cloud environment and can be controlled via a dedicated [command-line interface (CLI)](../reference/cli.md) or a [Terraform provider](../workflows/terraform-provider.md). - -## Prerequisites - -Make sure the following requirements are met: - -* Your machine is running Linux, macOS, or Windows -* You have admin rights on your machine -* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -* Your CSP is Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), or STACKIT - -## Install the Constellation CLI - -:::tip - -If you prefer to use Terraform, you can alternatively use the [Terraform provider](../workflows/terraform-provider.md) to manage the cluster's lifecycle. - -::: - -The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). -Install it with the following commands: - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-amd64 /usr/local/bin/constellation -``` - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-linux-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-arm64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI to your PATH: - -```bash -sudo install constellation-darwin-amd64 /usr/local/bin/constellation -``` - - - - - -1. Download the CLI: - -```bash -Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe' -``` - -2. [Verify the signature](../workflows/verify-cli.md) (optional) - -3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe` - -3. Add the CLI to your PATH: - - 1. Open `Advanced system settings` by searching for the App in the Windows search - 2. Go to the `Advanced` tab - 3. Click `Environment Variables…` - 4. Click variable called `Path` and click `Edit…` - 5. Click `New` - 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin` - - - - -:::tip -The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. -::: - -## Set up cloud credentials - -Constellation makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -### Required permissions - - - - -To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:DeleteInstanceProfile", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfilesForRole", - "iam:ListPolicyVersions", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "sts:GetCallerIdentity" - ], - "Resource": "*" - } - ] -} -``` - -The built-in `AdministratorAccess` policy is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/terraform/infrastructure/iam/aws/main.tf). - -The built-in `PowerUserAccess` policy is a superset of these permissions. - -Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - - - -The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -* `Microsoft.Attestation` -* `Microsoft.Compute` -* `Microsoft.Insights` -* `Microsoft.ManagedIdentity` -* `Microsoft.Network` - -By default, Constellation tries to register these automatically if they haven't been registered before. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `*/register/action` \[1] -* `Microsoft.Authorization/roleAssignments/*` -* `Microsoft.Authorization/roleDefinitions/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Resources/subscriptions/resourcegroups/*` - -The built-in `Owner` role is a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `Microsoft.Attestation/attestationProviders/*` -* `Microsoft.Compute/virtualMachineScaleSets/*` -* `Microsoft.Insights/components/*` -* `Microsoft.ManagedIdentity/userAssignedIdentities/*` -* `Microsoft.Network/loadBalancers/*` -* `Microsoft.Network/loadBalancers/backendAddressPools/*` -* `Microsoft.Network/networkSecurityGroups/*` -* `Microsoft.Network/publicIPAddresses/*` -* `Microsoft.Network/virtualNetworks/*` -* `Microsoft.Network/virtualNetworks/subnets/*` -* `Microsoft.Network/natGateways/*` - -The built-in `Contributor` role is a superset of these permissions. - -Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). - -1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - - - -Create a new project for Constellation or use an existing one. -Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. - -To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -* `iam.roles.create` -* `iam.roles.delete` -* `iam.roles.get` -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -To [create a Constellation cluster](../workflows/create.md), you need the following permissions: - -* `compute.addresses.createInternal` -* `compute.addresses.deleteInternal` -* `compute.addresses.get` -* `compute.addresses.useInternal` -* `compute.backendServices.create` -* `compute.backendServices.delete` -* `compute.backendServices.get` -* `compute.backendServices.use` -* `compute.disks.create` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.update` -* `compute.forwardingRules.create` -* `compute.forwardingRules.delete` -* `compute.forwardingRules.get` -* `compute.forwardingRules.setLabels` -* `compute.forwardingRules.list` -* `compute.globalAddresses.create` -* `compute.globalAddresses.delete` -* `compute.globalAddresses.get` -* `compute.globalAddresses.use` -* `compute.globalForwardingRules.create` -* `compute.globalForwardingRules.delete` -* `compute.globalForwardingRules.get` -* `compute.globalForwardingRules.setLabels` -* `compute.globalOperations.get` -* `compute.healthChecks.create` -* `compute.healthChecks.delete` -* `compute.healthChecks.get` -* `compute.healthChecks.useReadOnly` -* `compute.instanceGroupManagers.create` -* `compute.instanceGroupManagers.delete` -* `compute.instanceGroupManagers.get` -* `compute.instanceGroupManagers.update` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setTags` -* `compute.instanceTemplates.create` -* `compute.instanceTemplates.delete` -* `compute.instanceTemplates.get` -* `compute.instanceTemplates.useReadOnly` -* `compute.networks.create` -* `compute.networks.delete` -* `compute.networks.get` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.delete` -* `compute.routers.get` -* `compute.routers.update` -* `compute.subnetworks.create` -* `compute.subnetworks.delete` -* `compute.subnetworks.get` -* `compute.subnetworks.use` -* `compute.targetTcpProxies.create` -* `compute.targetTcpProxies.delete` -* `compute.targetTcpProxies.get` -* `compute.targetTcpProxies.use` -* `iam.serviceAccounts.actAs` - -Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. - -Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - - - -Constellation on STACKIT requires a User Access Token (UAT) for the OpenStack API and a STACKIT service account. -The UAT already has all required permissions by default. -The STACKIT service account needs the `editor` role to create STACKIT LoadBalancers. -Look at the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) on how to create the service account and assign the role. - - - - -### Authentication - -You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. - -:::note -The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. -::: - - - - -**Testing** - -You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). - -**Production** - -Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: - -```bash -aws configure -``` - -Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - - - - -**Testing** - -Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). - -**Production** - -Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: - -```bash -az login -``` - -Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - - - -**Testing** - -You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. - -**Production** - -Use one of the following options on a trusted machine: - -* Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) - - ```bash - gcloud auth application-default login - ``` - - This will ask you to log-in to your Google account and create your credentials. - The Constellation CLI will automatically load these credentials when needed. - -* Set up a service account and pass the credentials manually - - Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - - - -You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API). - -1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API -2. Create a configuration file with the credentials from the User Access Token under: - * Linux: `~/.config/openstack/clouds.yaml` - * macOS: `/Users//Library/Application Support/openstack/clouds.yaml` or `/etc/openstack/clouds.yaml` - * Windows: `%AppData%\openstack\clouds.yaml` - - - ```yaml - clouds: - stackit: - auth: - auth_url: https://keystone.api.iaas.eu01.stackit.cloud/v3 - username: REPLACE_WITH_UAT_USERNAME - password: REPLACE_WITH_UAT_PASSWORD - project_id: REPLACE_WITH_OPENSTACK_PROJECT_ID - project_name: REPLACE_WITH_STACKIT_PROJECT_NAME - user_domain_name: portal_mvp - project_domain_name: portal_mvp - region_name: RegionOne - identity_api_version: 3 - ``` - -:::caution - -`project_id` refers to the ID of your OpenStack project. The STACKIT portal also shows the STACKIT ID that's associated with your project in some places. Make sure you insert the OpenStack project ID in the `clouds.yaml` file. - -::: - -3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token -4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) -5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows) - - ```json - {"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"} - ``` - - - - - -## Next steps - -You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.23/getting-started/marketplaces.md b/docs/versioned_docs/version-2.23/getting-started/marketplaces.md deleted file mode 100644 index a6763a42a..000000000 --- a/docs/versioned_docs/version-2.23/getting-started/marketplaces.md +++ /dev/null @@ -1,56 +0,0 @@ -# Using Constellation via Cloud Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). - -This document explains how to run Constellation with the dynamically billed cloud marketplace images. - - - - -To use Constellation's marketplace images, ensure that you are subscribed to the [marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-2mbn65nv57oys) through the web portal. - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.aws.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -Constellation has a private marketplace plan. Please [contact us](https://www.edgeless.systems/enterprise-support/) to gain access. - -To use a marketplace image, you need to accept the marketplace image's terms once for your subscription with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest): - -```bash -az vm image terms accept --publisher edgelesssystems --offer constellation --plan constellation -``` - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.azure.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -To use a marketplace image, ensure that the account is entitled to use marketplace images by Edgeless Systems by accepting the terms through the [web portal](https://console.cloud.google.com/marketplace/vm/config/edgeless-systems-public/constellation). - -Then, enable the use of marketplace images in your Constellation `constellation-conf.yaml` [config file](../workflows/config.md): - -```bash -yq eval -i ".provider.gcp.useMarketplaceImage = true" constellation-conf.yaml -``` - - - - -On STACKIT, the selected Constellation image is always a marketplace image. You can find more information on the STACKIT portal. - - - - -Ensure that the cluster uses an official release image version (i.e., `.image=vX.Y.Z` in the `constellation-conf.yaml` file). - -From there, you can proceed with the [cluster creation](../workflows/create.md) as usual. diff --git a/docs/versioned_docs/version-2.23/intro.md b/docs/versioned_docs/version-2.23/intro.md deleted file mode 100644 index 0bfe86da9..000000000 --- a/docs/versioned_docs/version-2.23/intro.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -slug: / -id: intro ---- -# Introduction - -Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. - -![Constellation concept](/img/concept.svg) - - Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. - -:::tip -See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. -::: - -## Goals - -From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. - -From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. - -## Use cases - -Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: - -* Increasing the overall security of your clusters -* Increasing the trustworthiness of your SaaS offerings -* Moving sensitive workloads from on-prem to the cloud -* Meeting regulatory requirements - -## Next steps - -You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.23/overview/clouds.md b/docs/versioned_docs/version-2.23/overview/clouds.md deleted file mode 100644 index b2695d28e..000000000 --- a/docs/versioned_docs/version-2.23/overview/clouds.md +++ /dev/null @@ -1,66 +0,0 @@ -# Feature status of clouds - -What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. - -For Constellation, the ideal environment provides the following: - -1. Ability to run arbitrary software and images inside CVMs -2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or Intel TDX (available in Xeon CPUs since the Sapphire Rapids generation) -3. Ability for CVM guests to obtain raw hardware attestation statements -4. Reviewable, open-source firmware inside CVMs -5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) - -(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. - -The following table summarizes the state of features for different infrastructures. - -| **Feature** | **AWS** | **Azure** | **GCP** | **STACKIT** | **OpenStack (Yoga)** | -|-----------------------------------|---------|-----------|---------|--------------|----------------------| -| **1. Custom images** | Yes | Yes | Yes | Yes | Yes | -| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV | -| **4. Reviewable firmware** | Yes | No* | No | No | Depends on kernel/HV | -| **5. Confidential measured boot** | No | Yes | No | No | Depends on kernel/HV | - -## Amazon Web Services (AWS) - -Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). -Regarding (3), AWS provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. - -## Microsoft Azure - -With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. -Regarding (3), Azure provides direct access to attestation statements. -The firmware runs in an isolated domain inside the CVM and exposes a vTPM (5), but it's closed source (4). -On SEV-SNP, Azure uses VM Privilege Level (VMPL) isolation for the separation of firmware and the rest of the VM; on TDX, they use TD partitioning. -This firmware is signed by Azure. -The signature is reflected in the attestation statements of CVMs. -Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). - -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. - -## Google Cloud Platform (GCP) - -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#technologies) are based on AMD SEV-ES or SEV-SNP. -Regarding (3), with their SEV-SNP offering Google provides direct access to attestation statements. -However, regarding (5), attestation is partially based on the [Shielded VM vTPM](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by Google's hypervisor. -Hence, the hypervisor is currently part of Constellation's TCB. -Regarding (4), the CVMs still include closed-source firmware. - -[TDX on Google](https://cloud.google.com/blog/products/identity-security/confidential-vms-on-intel-cpus-your-datas-new-intelligent-defense) is in public preview. -With it, Constellation would have a similar TCB and attestation flow as with the current SEV-SNP offering. - -## STACKIT - -[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. - -## OpenStack - -OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. - -## Conclusion - -The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.23/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.23/overview/confidential-kubernetes.md deleted file mode 100644 index bff8c3322..000000000 --- a/docs/versioned_docs/version-2.23/overview/confidential-kubernetes.md +++ /dev/null @@ -1,42 +0,0 @@ -# Confidential Kubernetes - -We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: - -1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. -2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. -3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. - -Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. - -## Constellation security features - -Constellation implements the Confidential Kubernetes concept with the following security features. - -* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. -* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md), [persistent storage](../architecture/encrypted-storage.md), and other managed storage like [AWS S3](../architecture/encrypted-storage.md#encrypted-s3-object-storage). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. -* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. -* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. -* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) -* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. - -With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. - -![Confidential Kubernetes](../_media/concept-constellation.svg) - -## Comparison: Managed Kubernetes with CVMs - -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. - -![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) - -The following table highlights the key differences in terms of features. - -| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | -|-------------------------------------|------------------------------|--------------------------------------------| -| Runtime encryption | Partial (data plane only)| **Yes** | -| Node image verification | No | **Yes** | -| Full cluster attestation | No | **Yes** | -| Transparent network encryption | No | **Yes** | -| Transparent storage encryption | No | **Yes** | -| Confidential key management | No | **Yes** | -| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.23/overview/license.md b/docs/versioned_docs/version-2.23/overview/license.md deleted file mode 100644 index 98a9cbf94..000000000 --- a/docs/versioned_docs/version-2.23/overview/license.md +++ /dev/null @@ -1,15 +0,0 @@ -# License - -Constellation is available under the [Business Source License 1.1](https://github.com/edgelesssys/constellation/blob/main/LICENSE). - -You may use it free of charge for non-production use ("Community License"). - -## Enterprise License - -Enterprise Licenses permit production use and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). - -Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. - -## CSP Marketplaces - -Constellation is available through the Marketplaces of AWS, Azure, GCP, and STACKIT. This allows you to create self-managed Constellation clusters that are billed on a pay-per-use basis (hourly, per vCPU) with your CSP account. You can still get direct support by Edgeless Systems. For more information, please [contact us](https://www.edgeless.systems/enterprise-support/). diff --git a/docs/versioned_docs/version-2.23/overview/performance/application.md b/docs/versioned_docs/version-2.23/overview/performance/application.md deleted file mode 100644 index c67d59644..000000000 --- a/docs/versioned_docs/version-2.23/overview/performance/application.md +++ /dev/null @@ -1,102 +0,0 @@ -# Application benchmarks - -## HashiCorp Vault - -[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. -HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). -Vault-benchmark generates load on a Vault deployment and measures response times. - -This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. -You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. - -The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). -This allows services to send data to Vault for encryption, decryption, signing, and verification. - -## Results - -On each run, vault-benchmark sends requests and measures the latencies. -The measured latencies are aggregated through various statistical features. -After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. -The selected features are arithmetic mean, 99th percentile, minimum, and maximum. - -Arithmetic mean gives a general sense of the latency on each target. -The 99th percentile shows performance in (most likely) erroneous states. -Minimum and maximum mark the range within which latency varies each run. - -The benchmark was configured with 1300 workers and 10 seconds per run. -Those numbers were chosen empirically. -The latency was stabilizing at 10 seconds runtime, not changing with further increase. -Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. -All results are based on 100 runs. - -The following data was generated while running five replicas, one primary, and four standby nodes. -All numbers are in seconds if not indicated otherwise. -``` -========== Results AKS ========== -Mean: mean: 1.632200, variance: 0.002057 -P99: mean: 5.480679, variance: 2.263700 -Max: mean: 6.651001, variance: 2.808401 -Min: mean: 0.011415, variance: 0.000133 -========== Results GKE ========== -Mean: mean: 1.656435, variance: 0.003615 -P99: mean: 6.030807, variance: 3.955051 -Max: mean: 7.164843, variance: 3.300004 -Min: mean: 0.010233, variance: 0.000111 -========== Results C11n ========== -Mean: mean: 1.651549, variance: 0.001610 -P99: mean: 5.780422, variance: 3.016106 -Max: mean: 6.942997, variance: 3.075796 -Min: mean: 0.013774, variance: 0.000228 -========== AKS vs C11n ========== -Mean: +1.171577 % (AKS is faster) -P99: +5.185495 % (AKS is faster) -Max: +4.205618 % (AKS is faster) -Min: +17.128781 % (AKS is faster) -========== GKE vs C11n ========== -Mean: -0.295851 % (GKE is slower) -P99: -4.331603 % (GKE is slower) -Max: -3.195248 % (GKE is slower) -Min: +25.710886 % (GKE is faster) -``` - -**Interpretation**: Latencies are all within ~5% of each other. -AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. -Minimum latency is the lowest for GKE. -Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. -Overall, performance is at comparable levels across all three distributions. -Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. - -### Visualization - -The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). -The whiskers denote the minimum and maximum. -The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. -The circles outside the whiskers denote outliers. - -
-Mean Latency - -![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) - -
- -
-99th Percentile Latency - -![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) - -
- -
-Maximum Latency - -![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) - -
- -
-Minimum Latency - -![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) - -
diff --git a/docs/versioned_docs/version-2.23/overview/performance/compute.md b/docs/versioned_docs/version-2.23/overview/performance/compute.md deleted file mode 100644 index 88dd4b1b2..000000000 --- a/docs/versioned_docs/version-2.23/overview/performance/compute.md +++ /dev/null @@ -1,11 +0,0 @@ -# Impact of runtime encryption on compute performance - -All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. - -## AMD and Azure benchmarking - -AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. - -## AMD and Google benchmarking - -Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. diff --git a/docs/versioned_docs/version-2.23/overview/performance/io.md b/docs/versioned_docs/version-2.23/overview/performance/io.md deleted file mode 100644 index 3ae796f8a..000000000 --- a/docs/versioned_docs/version-2.23/overview/performance/io.md +++ /dev/null @@ -1,204 +0,0 @@ -# I/O performance benchmarks - -To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). - -This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. - -## Configurations - -### Constellation - -The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. -It ran on the following infrastructure configurations. - -Constellation on Azure: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `true` -- Region: `West US` -- Zone: `2` - -Constellation on GCP: - -- Nodes: 3 (1 Control-plane, 2 Worker) -- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `true` -- Zone: `europe-west3-b` - -### AKS - -On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. -AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory -- CVM: `false` -- Region: `West US` -- Zone: `2` - -### GKE - -On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. -GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. - -The following infrastructure configurations was used: - -- Nodes: 2 (2 Worker) -- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory -- CVM: `false` -- Zone: `europe-west3-b` - -## Results - -### Network - -This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. -The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). - -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). -AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). -The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). -Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. - -Constellation on Azure and AKS used an MTU of 1500. -Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. - -The difference in network bandwidth can largely be attributed to two factors. - -- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. - -#### Pod-to-Pod - -In this scenario, the client Pod connects directly to the server pod via its IP address. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] - end - subgraph Node B - Server[Server] - end - Client ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) - -#### Pod-to-Service - -In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. - -```mermaid -flowchart LR - subgraph Node A - Client[Client] ==>|traffic| Service[Service] - end - subgraph Node B - Server[Server] - end - Service ==>|traffic| Server -``` - -The results for "Pod-to-Pod" on Azure are as follows: - -![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) - -The results for "Pod-to-Pod" on GCP are as follows: - -![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) - -In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. - -Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - -### Storage I/O - -Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). -Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). -Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). -Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. - -For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. -The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - -- 6400 (20000 burst) IOPS -- 144 MB/s (600 MB/s burst) throughput - -However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - -- 500 (600 burst) IOPS -- 60 MB/s (150 MB/s burst) throughput - -For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. -The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - -- 3,000 read IOPS -- 15,000 write IOPS -- 240 MB/s read throughput -- 240 MB/s write throughput - -However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - -- 2400 read IOPS -- 2400 write IOPS -- 112 MB/s read throughput -- 112 MB/s write throughput - -The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. -The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. -The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. - -The following `fio` settings were used: - -- No Cloud caching -- No OS caching -- Single CPU -- 60 seconds runtime -- 10 seconds ramp-up time -- 10 GiB file -- IOPS: 4 KB blocks and 128 iodepth -- Bandwidth: 1024 KB blocks and 128 iodepth - -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). - -The results for IOPS on Azure are as follows: - -![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) - -The results for IOPS on GCP are as follows: - -![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) - -The results for bandwidth on Azure are as follows: - -![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) - -The results for bandwidth on GCP are as follows: - -![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) - -On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. - -When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. - -## Conclusion - -Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. -While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. - -For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. -Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. -However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. -We're confident that Constellation will provide a similar level of performance with an upcoming release. - -Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.23/overview/performance/performance.md b/docs/versioned_docs/version-2.23/overview/performance/performance.md deleted file mode 100644 index 59bf86602..000000000 --- a/docs/versioned_docs/version-2.23/overview/performance/performance.md +++ /dev/null @@ -1,17 +0,0 @@ -# Performance analysis of Constellation - -This section provides a comprehensive examination of the performance characteristics of Constellation. - -## Runtime encryption - -Runtime encryption affects compute performance. [Benchmarks by Azure and Google](compute.md) show that the performance degradation of Confidential VMs (CVMs) is small, ranging from 2% to 8% for compute-intensive workloads. - -## I/O performance benchmarks - -We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. -We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. - -## Application benchmarking - -To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. -The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.23/overview/product.md b/docs/versioned_docs/version-2.23/overview/product.md deleted file mode 100644 index 4b5d90706..000000000 --- a/docs/versioned_docs/version-2.23/overview/product.md +++ /dev/null @@ -1,12 +0,0 @@ -# Product features - -Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. - -From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. - -From an operational perspective, Constellation provides the following key features: - -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), and STACKIT. Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. -* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. -* **Support for Terraform**: Constellation includes a [Terraform provider](../workflows/terraform-provider.md) that lets you manage the full lifecycle of your cluster via Terraform. diff --git a/docs/versioned_docs/version-2.23/overview/security-benefits.md b/docs/versioned_docs/version-2.23/overview/security-benefits.md deleted file mode 100644 index 51a8b64f5..000000000 --- a/docs/versioned_docs/version-2.23/overview/security-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ -# Security benefits and threat model - -Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). - -![TCB comparison](../_media/tcb.svg) - -Given this background, the following describes the concrete threat classes that Constellation addresses. - -## Insider access - -Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. -This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. - -## Infrastructure-based attacks - -Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. - -## Supply chain attacks - -Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. - -In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.23/reference/cli.md b/docs/versioned_docs/version-2.23/reference/cli.md deleted file mode 100644 index 7cbc0be8d..000000000 --- a/docs/versioned_docs/version-2.23/reference/cli.md +++ /dev/null @@ -1,873 +0,0 @@ -# CLI reference - - - -Use the Constellation CLI to create and manage your clusters. - -Usage: - -``` -constellation [command] -``` -Commands: - -* [config](#constellation-config): Work with the Constellation configuration file - * [generate](#constellation-config-generate): Generate a default configuration and state file - * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image - * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers - * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI - * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version -* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster -* [apply](#constellation-apply): Apply a configuration to a Constellation cluster -* [mini](#constellation-mini): Manage MiniConstellation clusters - * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster - * [down](#constellation-mini-down): Destroy a MiniConstellation cluster -* [status](#constellation-status): Show status of a Constellation cluster -* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster -* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster - * [check](#constellation-upgrade-check): Check for possible upgrades - * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster -* [recover](#constellation-recover): Recover a completely stopped Constellation cluster -* [terminate](#constellation-terminate): Terminate a Constellation cluster -* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider - * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster - * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster - * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster - * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster - * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files - * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile - * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile -* [version](#constellation-version): Display version of this CLI -* [init](#constellation-init): Initialize the Constellation cluster -* [ssh](#constellation-ssh): Generate a certificate for emergency SSH access - -## constellation config - -Work with the Constellation configuration file - -### Synopsis - -Work with the Constellation configuration file. - -### Options - -``` - -h, --help help for config -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config generate - -Generate a default configuration and state file - -### Synopsis - -Generate a default configuration and state file for your selected cloud provider. - -``` -constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] -``` - -### Options - -``` - -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-tdx|azure-trustedlaunch|gcp-sev-snp|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used - -h, --help help for generate - -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.30") - -t, --tags strings additional tags for created resources given a list of key=value -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config fetch-measurements - -Fetch measurements for configured cloud provider and image - -### Synopsis - -Fetch measurements for configured cloud provider and image. - -A config needs to be generated first. - -``` -constellation config fetch-measurements [flags] -``` - -### Options - -``` - -h, --help help for fetch-measurements - -s, --signature-url string alternative URL to fetch measurements' signature from - -u, --url string alternative URL to fetch measurements from -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config instance-types - -Print the supported instance types for all cloud providers - -### Synopsis - -Print the supported instance types for all cloud providers. - -``` -constellation config instance-types [flags] -``` - -### Options - -``` - -h, --help help for instance-types -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config kubernetes-versions - -Print the Kubernetes versions supported by this CLI - -### Synopsis - -Print the Kubernetes versions supported by this CLI. - -``` -constellation config kubernetes-versions [flags] -``` - -### Options - -``` - -h, --help help for kubernetes-versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation config migrate - -Migrate a configuration file to a new version - -### Synopsis - -Migrate a configuration file to a new version. - -``` -constellation config migrate [flags] -``` - -### Options - -``` - -h, --help help for migrate -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation create - -Create instances on a cloud platform for your Constellation cluster - -### Synopsis - -Create instances on a cloud platform for your Constellation cluster. - -``` -constellation create [flags] -``` - -### Options - -``` - -h, --help help for create - -y, --yes create the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation apply - -Apply a configuration to a Constellation cluster - -### Synopsis - -Apply a configuration to a Constellation cluster to initialize or upgrade the cluster. - -``` -constellation apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | init | attestationconfig | certsans | helm | image | k8s } - -y, --yes run command without further confirmation - WARNING: the command might delete or update existing resources without additional checks. Please read the docs. - -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini - -Manage MiniConstellation clusters - -### Synopsis - -Manage MiniConstellation clusters. - -### Options - -``` - -h, --help help for mini -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini up - -Create and initialize a new MiniConstellation cluster - -### Synopsis - -Create and initialize a new MiniConstellation cluster. - -A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. - -``` -constellation mini up [flags] -``` - -### Options - -``` - -h, --help help for up - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation mini down - -Destroy a MiniConstellation cluster - -### Synopsis - -Destroy a MiniConstellation cluster. - -``` -constellation mini down [flags] -``` - -### Options - -``` - -h, --help help for down - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation status - -Show status of a Constellation cluster - -### Synopsis - -Show the status of a constellation cluster. - -Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. - -``` -constellation status [flags] -``` - -### Options - -``` - -h, --help help for status -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation verify - -Verify the confidential properties of a Constellation cluster - -### Synopsis - -Verify the confidential properties of a Constellation cluster. -If arguments aren't specified, values are read from `constellation-state.yaml`. - -``` -constellation verify [flags] -``` - -### Options - -``` - --cluster-id string expected cluster identifier - -h, --help help for verify - -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] - -o, --output string print the attestation document in the output format {json|raw} -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade - -Find and apply upgrades to your Constellation cluster - -### Synopsis - -Find and apply upgrades to your Constellation cluster. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade check - -Check for possible upgrades - -### Synopsis - -Check which upgrades can be applied to your Constellation Cluster. - -``` -constellation upgrade check [flags] -``` - -### Options - -``` - -h, --help help for check - --ref string the reference to use for querying new versions (default "-") - --stream string the stream to use for querying new versions (default "stable") - -u, --update-config update the specified config file with the suggested versions -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation upgrade apply - -Apply an upgrade to a Constellation cluster - -### Synopsis - -Apply an upgrade to a Constellation cluster by applying the chosen configuration. - -``` -constellation upgrade apply [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for apply - --skip-helm-wait install helm charts without waiting for deployments to be ready - --skip-phases strings comma-separated list of upgrade phases to skip - one or multiple of { infrastructure | helm | image | k8s } - -y, --yes run upgrades without further confirmation - WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. - WARNING: might unintentionally overwrite measurements in the running cluster. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation recover - -Recover a completely stopped Constellation cluster - -### Synopsis - -Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. - -This is only required if instances restart without other instances available for bootstrapping. - -``` -constellation recover [flags] -``` - -### Options - -``` - -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] - -h, --help help for recover -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation terminate - -Terminate a Constellation cluster - -### Synopsis - -Terminate a Constellation cluster. - -The cluster can't be started again, and all persistent storage will be lost. - -``` -constellation terminate [flags] -``` - -### Options - -``` - -h, --help help for terminate - -y, --yes terminate the cluster without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam - -Work with the IAM configuration on your cloud provider - -### Synopsis - -Work with the IAM configuration on your cloud provider. - -### Options - -``` - -h, --help help for iam -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create - -Create IAM configuration on a cloud platform for your Constellation cluster - -### Synopsis - -Create IAM configuration on a cloud platform for your Constellation cluster. - -### Options - -``` - -h, --help help for create - --update-config update the config file with the specific IAM information - -y, --yes create the IAM configuration without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam create aws - -Create IAM configuration on AWS for your Constellation cluster - -### Synopsis - -Create IAM configuration on AWS for your Constellation cluster. - -``` -constellation iam create aws [flags] -``` - -### Options - -``` - -h, --help help for aws - --prefix string name prefix for all resources (required) - --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) - See the Constellation docs for a list of currently supported regions. -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create azure - -Create IAM configuration on Microsoft Azure for your Constellation cluster - -### Synopsis - -Create IAM configuration on Microsoft Azure for your Constellation cluster. - -``` -constellation iam create azure [flags] -``` - -### Options - -``` - -h, --help help for azure - --region string region the resources will be created in, e.g., westus (required) - --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) - --servicePrincipal string name of the service principal that will be created (required) - --subscriptionID string subscription ID of the Azure account. Required if the 'ARM_SUBSCRIPTION_ID' environment variable is not set -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam create gcp - -Create IAM configuration on GCP for your Constellation cluster - -### Synopsis - -Create IAM configuration on GCP for your Constellation cluster. - -``` -constellation iam create gcp [flags] -``` - -### Options - -``` - -h, --help help for gcp - --prefix string Prefix for the service account ID and VM ID that will be created (required) - Must be letters, digits, or hyphens. - --projectID string ID of the GCP project the configuration will be created in (required) - Find it on the welcome screen of your project: https://console.cloud.google.com/welcome - --zone string GCP zone the cluster will be deployed in (required) - Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - --update-config update the config file with the specific IAM information - -C, --workspace string path to the Constellation workspace - -y, --yes create the IAM configuration without further confirmation -``` - -## constellation iam destroy - -Destroy an IAM configuration and delete local Terraform files - -### Synopsis - -Destroy an IAM configuration and delete local Terraform files. - -``` -constellation iam destroy [flags] -``` - -### Options - -``` - -h, --help help for destroy - -y, --yes destroy the IAM configuration without asking for confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade - -Find and apply upgrades to your IAM profile - -### Synopsis - -Find and apply upgrades to your IAM profile. - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation iam upgrade apply - -Apply an upgrade to an IAM profile - -### Synopsis - -Apply an upgrade to an IAM profile. - -``` -constellation iam upgrade apply [flags] -``` - -### Options - -``` - -h, --help help for apply - -y, --yes run upgrades without further confirmation -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation version - -Display version of this CLI - -### Synopsis - -Display version of this CLI. - -``` -constellation version [flags] -``` - -### Options - -``` - -h, --help help for version -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation init - -Initialize the Constellation cluster - -### Synopsis - -Initialize the Constellation cluster. - -Start your confidential Kubernetes. - -``` -constellation init [flags] -``` - -### Options - -``` - --conformance enable conformance mode - -h, --help help for init - --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config - --skip-helm-wait install helm charts without waiting for deployments to be ready -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - -## constellation ssh - -Generate a certificate for emergency SSH access - -### Synopsis - -Generate a certificate for emergency SSH access to your SSH-enabled constellation cluster. - -``` -constellation ssh [flags] -``` - -### Options - -``` - -h, --help help for ssh - --key string the path to an existing SSH public key -``` - -### Options inherited from parent commands - -``` - --debug enable debug logging - --force disable version compatibility checks - might result in corrupted clusters - --tf-log string Terraform log level (default "NONE") - -C, --workspace string path to the Constellation workspace -``` - diff --git a/docs/versioned_docs/version-2.23/reference/migration.md b/docs/versioned_docs/version-2.23/reference/migration.md deleted file mode 100644 index eb55d650b..000000000 --- a/docs/versioned_docs/version-2.23/reference/migration.md +++ /dev/null @@ -1,140 +0,0 @@ -# Migrations - -This document describes breaking changes and migrations between Constellation releases. -Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Migrations to v2.23.0 - -### GCP - -GCP will require the additional permission `compute.forwardingRules.list`. Please update your IAM roles using `constellation iam upgrade apply`. - -## Migrations to v2.19.1 - -### Azure - -* During the upgrade, security rules are migrated and the old ones need to be cleaned up manually by the user. The below script shows how to delete them through the Azure CLI: - -```bash -#!/usr/bin/env bash -name="" # the name provided in the config -uid="" # the cluster id can be retrieved via `yq '.infrastructure.uid' constellation-state.yaml` -resource_group="" # the RG can be retrieved via `yq '.provider.azure.resourceGroup' constellation-conf.yaml` - -rules=( - "kubernetes" - "bootstrapper" - "verify" - "recovery" - "join" - "debugd" - "konnectivity" -) - -for rule in "${rules[@]}"; do - echo "Deleting rule: ${rule}" - az network nsg rule delete \ - --resource-group "${resource_group}" \ - --nsg-name "${name}-${uid}" \ - --name "${rule}" -done - -echo "All specified rules have been deleted." -``` - -## Migrating from CLI versions before 2.21.1 - -### AWS - -* AWS clusters that use `LoadBalancer` resources require more IAM permissions. Please upgrade your IAM roles using `constellation iam upgrade apply`. This will show necessary changes and apply them, if desired. - -## Migrating from CLI versions before 2.19.0 - -### Azure - -* To allow seamless upgrades on Azure when Kubernetes services of type `LoadBalancer` are deployed, the target - load balancer in which the `cloud-controller-manager` creates load balancing rules was changed. Instead of using the load balancer - created and maintained by the CLI's Terraform code, the `cloud-controller-manager` now creates its own load balancer in Azure. - If your Constellation has services of type `LoadBalancer`, please remove them before the upgrade and re-apply them - afterward. - -## Migrating from CLI versions before 2.18.0 - -* The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. -* To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. -* Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. -* To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: - 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. - 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. - 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. - -## Migrating from CLI versions before 2.10 - -* AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. -* The global `nodeGroups` field was added. -* The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. -* The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. - -## Migrating from CLI versions before 2.9 - -* The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication - -## Migrating from CLI versions before 2.8 - -* The `measurements` field for each cloud service provider was replaced with a global `attestation` field. -* The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. -* The optional global field `attestationVariant` was replaced by the now required `attestation` field. - -## Migrating from CLI versions before 2.3 - -* The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. - As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). -* The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: -
- Show all - - | CSP | old image | new image | - | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | - | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | - | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | - | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | - | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | - | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | - | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | - | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | - | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | - | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | - | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | - | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | - | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | - -
-* The `enforcedMeasurements` field has been removed and merged with the `measurements` field. - * To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` - * To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: - - ```diff - measurements: - - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + 0: - + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= - + warnOnly: true - - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + 8: - + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= - + warnOnly: false - -enforcedMeasurements: - - - 8 - ``` diff --git a/docs/versioned_docs/version-2.23/reference/slsa.md b/docs/versioned_docs/version-2.23/reference/slsa.md deleted file mode 100644 index 21f4e713c..000000000 --- a/docs/versioned_docs/version-2.23/reference/slsa.md +++ /dev/null @@ -1,73 +0,0 @@ -# Supply chain levels for software artifacts (SLSA) adoption - -[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. - -:::info -SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. -::: - -## Level 1 - Adopted - -**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** - -All build steps are automated via [Bazel](https://github.com/edgelesssys/constellation/tree/main/bazel/ci) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). - -## Level 2 - Adopted - -**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** - -Constellation is hosted on GitHub using git. - -**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** - -All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** - -Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. - -**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** - -Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. - -## Level 3 - Adopted - -**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. - -**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** - -Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. - -The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. - -The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. - -**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** - -All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). - -**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** - -All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. - -We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). - -**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** - -As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. - -Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. - -**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** - -As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. - -## Level 4 - In Progress - -We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.23/reference/terraform.md b/docs/versioned_docs/version-2.23/reference/terraform.md deleted file mode 100644 index 9825a8bb8..000000000 --- a/docs/versioned_docs/version-2.23/reference/terraform.md +++ /dev/null @@ -1,37 +0,0 @@ -# Terraform usage - -[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. - -:::info -Information on this page is intended for users who are familiar with Terraform. -It's not required for common usage of Constellation. -See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. -::: - -## Terraform state files - -Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. -The subdirectories are created on the first Constellation CLI action that uses Terraform internally. - -Currently, these subdirectories are: - -* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster -* `constellation-iam-terraform` - Terraform state files for IAM configuration - -As with all commands, commands that work with these files (e.g., `apply`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. - -## Interacting with Terraform manually - -Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. - -## Terraform debugging - -To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): -* `JSON` (JSON-formatted logs at `TRACE` level) -* `TRACE` -* `DEBUG` -* `INFO` -* `WARN` -* `ERROR` - -The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.23/workflows/cert-manager.md b/docs/versioned_docs/version-2.23/workflows/cert-manager.md deleted file mode 100644 index 1d847e8bf..000000000 --- a/docs/versioned_docs/version-2.23/workflows/cert-manager.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install cert-manager - -:::caution -If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. -::: - -Constellation ships with cert-manager preinstalled. -The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. -You are free to install more instances of cert-manager into other namespaces. -However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. -Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. -It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. -CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.23/workflows/config.md b/docs/versioned_docs/version-2.23/workflows/config.md deleted file mode 100644 index 7868ff1be..000000000 --- a/docs/versioned_docs/version-2.23/workflows/config.md +++ /dev/null @@ -1,353 +0,0 @@ -# Configure your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. - -## Creating the configuration file - -You can generate a configuration file for your CSP by using the following CLI command: - - - - -```bash -constellation config generate aws -``` - - - - -```bash -constellation config generate azure -``` - - - - -```bash -constellation config generate gcp -``` - - - - -```bash -constellation config generate stackit -``` - - - - -This creates the file `constellation-conf.yaml` in the current directory. - -## Choosing a VM type - -Constellation supports the following VM types: - - - -By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. -If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). -Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. - -If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. - -You can also run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - - - -By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster. -Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file. - -The following instance types are known to be supported: - -| name | vCPUs | GB RAM | -|----------|-------|--------| -| m1a.4cd | 4 | 30 | -| m1a.8cd | 8 | 60 | -| m1a.16cd | 16 | 120 | -| m1a.30cd | 30 | 230 | - -You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html). - -The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. - - - - -Fill the desired VM type into the `instanceType` fields in the `constellation-conf.yml` file. - -## Creating additional node groups - -By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. -If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. -Each node group can be scaled individually. - -Consider the following example for AWS: - -```yaml -nodeGroups: - control_plane_default: - role: control-plane - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 3 - worker_default: - role: worker - instanceType: c6a.xlarge - stateDiskSizeGB: 30 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 2 - high_cpu: - role: worker - instanceType: c6a.24xlarge - stateDiskSizeGB: 128 - stateDiskType: gp3 - zone: eu-west-1c - initialCount: 1 -``` - -This configuration creates an additional node group `high_cpu` with a larger instance type and disk. - -You can use the field `zone` to specify what availability zone nodes of the group are placed in. -On Azure, this field is empty by default and nodes are automatically spread across availability zones. -STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2`, and `eu01-3` zones. -Consult the documentation of your cloud provider for more information: - -* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) -* [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) -* [GCP](https://cloud.google.com/compute/docs/regions-zones) -* [STACKIT](https://docs.stackit.cloud/stackit/en/regions-and-availability-zones-75137212.html) - -## Choosing a Kubernetes version - -To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. -See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). - -## Creating an IAM configuration - -You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. -If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - - - -You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create aws --zone=us-east-2a --prefix=constellTest -``` - -This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. - -Constellation OS images are currently replicated to the following regions: - -* `eu-central-1` -* `eu-west-1` -* `eu-west-3` -* `us-east-2` -* `ap-south-1` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - -You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create azure --subscriptionID 00000000-0000-0000-0000-000000000000 --region=westus --resourceGroup=constellTest --servicePrincipal=spTest -``` - -This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. - -CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - -* `germanywestcentral` -* `westus` -* `eastus` -* `northeurope` -* `westeurope` -* `southeastasia` - -If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - -You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). - -```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --prefix=constell-test -``` - -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. - -Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - -Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - - -
-Alternatively, you can manually create the IAM configuration on your CSP. - -The following describes the configuration fields and how you obtain the required information or create the required resources. - - - - -* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. - - Constellation OS images are currently replicated to the following regions: - * `eu-central-1` - * `eu-west-1` - * `eu-west-3` - * `us-east-2` - * `ap-south-1` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). - - You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - -* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. - - Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). - -* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. - -* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. - - You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile_name`. - - Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - - - - -* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. - - You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). - -* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. - - You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). - -* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. - - CVMs are available in several Azure regions. Constellation OS images are currently replicated to the following: - - * `germanywestcentral` - * `westus` - * `eastus` - * `northeurope` - * `westeurope` - * `southeastasia` - - If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+Azure+image+region:+xx-xxxx-x). - - You can find a list of all [regions in Azure's documentation](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). - -* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. - -* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. - - Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. - - Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. - - The user-assigned identity is used by instances of the cluster to access other cloud resources. - For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - - - - -* **project**: The ID of your GCP project, e.g., `constellation-129857`. - - You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). - -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. - - You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. - - You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). - -* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: - - * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` - * `Compute Network Admin (roles/compute.networkAdmin)` - * `Compute Security Admin (roles/compute.securityAdmin)` - * `Compute Storage Admin (roles/compute.storageAdmin)` - * `Service Account User (roles/iam.serviceAccountUser)` - - Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - - - - -STACKIT requires manual creation and configuration of service accounts. Look at the [first steps](../getting-started/first-steps.md) for more information. - - - -
- -Now that you've configured your CSP, you can [create your cluster](./create.md). - -## Deleting an IAM configuration - -You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. - -Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): - -```bash -constellation iam destroy -``` - -:::caution -For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. -This also includes any additional resources in the resource group that weren't created by Constellation. -::: diff --git a/docs/versioned_docs/version-2.23/workflows/create.md b/docs/versioned_docs/version-2.23/workflows/create.md deleted file mode 100644 index 6074ebb16..000000000 --- a/docs/versioned_docs/version-2.23/workflows/create.md +++ /dev/null @@ -1,93 +0,0 @@ -# Create your cluster - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Creating your cluster happens through multiple phases. -The most significant ones are: - -1. Creating the necessary resources in your cloud environment -2. Bootstrapping the Constellation cluster and setting up a connection -3. Installing the necessary Kubernetes components - -`constellation apply` handles all this in a single command. -You can use the `--skip-phases` flag to skip specific phases of the process. -For example, if you created the infrastructure manually, you can skip the cloud resource creation phase. - -See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. - -:::tip -If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. -::: - -Before you create the cluster, make sure to have a [valid configuration file](./config.md). - - - - -```bash -constellation apply -``` - -`apply` stores the state of your cluster's cloud resources in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - - - -Self-managed infrastructure allows for more flexibility in the setup, by separating the infrastructure setup from the Constellation cluster management. -This provides flexibility in DevOps and can meet potential regulatory requirements. -It's recommended to use Terraform for infrastructure management, but you can use any tool of your choice. - -:::info - - When using Terraform, you can use the [Constellation Terraform provider](./terraform-provider.md) to manage the entire Constellation cluster lifecycle. - -::: - -You can refer to the Terraform files for the selected CSP from the [Constellation GitHub repository](https://github.com/edgelesssys/constellation/tree/main/terraform/infrastructure) for a minimum Constellation cluster configuration. From this base, you can now add, edit, or substitute resources per your own requirements with the infrastructure -management tooling of your choice. You need to keep the essential functionality of the base configuration in order for your cluster to function correctly. - - - -:::info - - On Azure, a manual update to the MAA provider's policy is necessary. - You can apply the update with the following command after creating the infrastructure, with `` being the URL of the MAA provider (i.e., `$(terraform output attestation_url | jq -r)`, when using the minimal Terraform configuration). - - ```bash - constellation maa-patch - ``` - -::: - - - -Make sure all necessary resources are created, e.g., through checking your CSP's portal and retrieve the necessary values, aligned with the outputs (specified in `outputs.tf`) of the base configuration. - -Fill these outputs into the corresponding fields of the `Infrastructure` block inside the `constellation-state.yaml` file. For example, fill the IP or DNS name your cluster can be reached at into the `.Infrastructure.ClusterEndpoint` field. - -With the required cloud resources set up, continue with initializing your cluster. - -```bash -constellation apply --skip-phases=infrastructure -``` - - - - -Finally, configure `kubectl` for your cluster: - -```bash -export KUBECONFIG="$PWD/constellation-admin.conf" -``` - -🏁 That's it. You've successfully created a Constellation cluster. - -### Troubleshooting - -In case `apply` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.23/workflows/lb.md b/docs/versioned_docs/version-2.23/workflows/lb.md deleted file mode 100644 index 868e61076..000000000 --- a/docs/versioned_docs/version-2.23/workflows/lb.md +++ /dev/null @@ -1,28 +0,0 @@ -# Expose a service - -Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). - -## Internet-facing LB service on AWS - -To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. - -Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/). - -For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). - -:::caution -Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. -::: - -## Ingress on AWS - -The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`. -AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type). -The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation. -If a service can be exposed on a `NodePort`, the target type `instance` can be used. - -See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information. - -:::caution -Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic! -::: diff --git a/docs/versioned_docs/version-2.23/workflows/recovery.md b/docs/versioned_docs/version-2.23/workflows/recovery.md deleted file mode 100644 index 592ae247b..000000000 --- a/docs/versioned_docs/version-2.23/workflows/recovery.md +++ /dev/null @@ -1,179 +0,0 @@ -# Recover your cluster - -Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. -Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. -Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). - -Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. -The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. - -## Identify unhealthy clusters - -The first step to recovery is identifying when a cluster becomes unhealthy. -Usually, this can be first observed when the Kubernetes API server becomes unresponsive. - -You can check the health status of the nodes via the cloud service provider (CSP). -Constellation provides logging information on the boot process and status via serial console output. -In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - - - -First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -In the Azure portal, find the cluster's resource group. -Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. -On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. - -Second, check the boot logs of these *Instances*. -In the scale set's *Instances* view, open the details page of the desired instance. -On the left, go to **Support + troubleshooting** > **Serial console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} -{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} -{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} -{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} -{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, check that the control plane *Instance Group* has enough members in a *Ready* state. -In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. - -Second, check the status of the *VM Instances*. -Go to **VM Instances** and open the details of the desired instance. -Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: - -![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state. - -Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**. - -In the serial console output, search for `Waiting for decryption key`. -Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} -``` - -The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. -If this fails due to an unhealthy control plane, you will see log messages similar to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} -{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} -{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} -{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} -{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} -``` - -This means that you have to recover the node manually. - - - - -## Recover a cluster - -Recovering a cluster requires the following parameters: - -* The `constellation-state.yaml` file in your working directory or the cluster's endpoint -* The master secret of the cluster - -A cluster can be recovered like this: - -```bash -$ constellation recover -Pushed recovery key. -Pushed recovery key. -Pushed recovery key. -Recovered 3 control-plane nodes. -``` - -In the serial console output of the node you'll see a similar output to the following: - -```json -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} -{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} -{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} -``` diff --git a/docs/versioned_docs/version-2.23/workflows/reproducible-builds.md b/docs/versioned_docs/version-2.23/workflows/reproducible-builds.md deleted file mode 100644 index e3bc46095..000000000 --- a/docs/versioned_docs/version-2.23/workflows/reproducible-builds.md +++ /dev/null @@ -1,63 +0,0 @@ -# Reproduce released artifacts - -Constellation has first-class support for [reproducible builds](https://reproducible-builds.org). -Reproducing the released artifacts is an alternative to [signature verification](verify-cli.md) that doesn't require trusting Edgeless Systems' release process. -The following sections describe how to rebuild an artifact and how Constellation ensures that the rebuild reproduces the artifacts bit-by-bit. - -## Build environment prerequisites - -The build systems used by Constellation - [Bazel](https://bazel.build/) and [Nix](https://nixos.org) - are designed for deterministic, reproducible builds. -These two dependencies should be the only prerequisites for a successful build. -However, it can't be ruled out completely that peculiarities of the host affect the build result. -Thus, we recommend the following host setup for best results: - -1. A Linux operating system not older than v5.4. -2. The GNU C library not older than v2.31 (avoid `musl`). -3. GNU `coreutils` not older than v8.30 (avoid `busybox`). -4. An `ext4` filesystem for building. -5. AppArmor turned off. - -This is given, for example, on an Ubuntu 22.04 system, which is also used for reproducibility tests. - -:::note - -To avoid any backwards-compatibility issues, the host software versions should also not be much newer than the Constellation release. - -::: - -## Run the build - -The following instructions outline qualitatively how to reproduce a build. -Constellation implements these instructions in the [Reproducible Builds workflow](https://github.com/edgelesssys/constellation/actions/workflows/reproducible-builds.yml), which continuously tests for reproducibility. -The workflow is a good place to look up specific version numbers and build steps. - -1. Check out the Constellation repository at the tag corresponding to the release. - - ```bash - git clone https://github.com/edgelesssys/constellation.git - cd constellation - git checkout v2.20.0 - ``` - -2. [Install the Bazel release](https://bazel.build/install) specified in `.bazelversion`. -3. [Install Nix](https://nixos.org/download/) (any recent version should do). -4. Run the build with `bazel build $target` for one of the following targets of interest: - - ```data - //cli:cli_enterprise_darwin_amd64 - //cli:cli_enterprise_darwin_arm64 - //cli:cli_enterprise_linux_amd64 - //cli:cli_enterprise_linux_arm64 - //cli:cli_enterprise_windows_amd64 - ``` - -5. Compare the build result with the downloaded release artifact. - - - -## Feedback - -Reproduction failures often indicate a bug in the build system or in the build definitions. -Therefore, we're interested in any reproducibility issues you might encounter. -[Start a bug report](https://github.com/edgelesssys/constellation/issues/new/choose) and describe the details of your build environment. -Make sure to include your result binary or a [`diffoscope`](https://diffoscope.org/) report, if possible. diff --git a/docs/versioned_docs/version-2.23/workflows/s3proxy.md b/docs/versioned_docs/version-2.23/workflows/s3proxy.md deleted file mode 100644 index 121e8a461..000000000 --- a/docs/versioned_docs/version-2.23/workflows/s3proxy.md +++ /dev/null @@ -1,58 +0,0 @@ -# Install s3proxy - -Constellation includes a transparent client-side encryption proxy for [AWS S3](https://aws.amazon.com/de/s3/) and compatible stores. -s3proxy encrypts objects before sending them to S3 and automatically decrypts them on retrieval, without requiring changes to your application. -With s3proxy, you can use S3 for storage in a confidential way without having to trust the storage provider. - -## Limitations - -Currently, s3proxy has the following limitations: -- Only `PutObject` and `GetObject` requests are encrypted/decrypted by s3proxy. -By default, s3proxy will block requests that may expose unencrypted data to S3 (e.g. UploadPart). -The `allow-multipart` flag disables request blocking for evaluation purposes. -- Using the [Range](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax) header on `GetObject` is currently not supported and will result in an error. - -These limitations will be removed with future iterations of s3proxy. -If you want to use s3proxy but these limitations stop you from doing so, consider [opening an issue](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&projects=&template=feature_request.yml). - -## Deployment - -You can add the s3proxy to your Constellation cluster as follows: -1. Add the Edgeless Systems chart repository: - ```bash - helm repo add edgeless https://helm.edgeless.systems/stable - helm repo update - ``` -2. Set ACCESS_KEY and ACCESS_SECRET to valid credentials you want s3proxy to use to interact with S3. -3. Deploy s3proxy: - ```bash - helm install s3proxy edgeless/s3proxy --set awsAccessKeyID="$ACCESS_KEY" --set awsSecretAccessKey="$ACCESS_SECRET" - ``` - -If you want to run a demo application, check out the [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example. - - -## Technical details - -### Encryption - -s3proxy relies on Google's [Tink Cryptographic Library](https://developers.google.com/tink) to implement cryptographic operations securely. -The used cryptographic primitives are [NIST SP 800 38f](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38F.pdf) for key wrapping and [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)-[GCM](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Galois/counter_(GCM)) with 256 bit keys for data encryption. - -s3proxy uses [envelope encryption](https://cloud.google.com/kms/docs/envelope-encryption) to encrypt objects. -This means s3proxy uses a key encryption key (KEK) issued by the [KeyService](../architecture/microservices.md#keyservice) to encrypt data encryption keys (DEKs). -Each S3 object is encrypted with its own DEK. -The encrypted DEK is then saved as metadata of the encrypted object. -This enables key rotation of the KEK without re-encrypting the data in S3. -The approach also allows access to objects from different locations, as long as each location has access to the KEK. - -### Traffic interception - -To use s3proxy, you have to redirect your outbound S3 traffic to s3proxy. -This can either be done by modifying your client application or by changing the deployment of your application. - -The necessary deployment modifications are to add DNS redirection and a trusted TLS certificate to the client's trust store. -DNS redirection can be defined for each pod, allowing you to use s3proxy for one application without changing other applications in the same cluster. -Adding a trusted TLS certificate is necessary as clients communicate with s3proxy via HTTPS. -To have your client application trust s3proxy's TLS certificate, the certificate has to be added to the client's certificate trust store. -The [Filestash with s3proxy](../getting-started/examples/filestash-s3proxy.md) example shows how to do this. diff --git a/docs/versioned_docs/version-2.23/workflows/sbom.md b/docs/versioned_docs/version-2.23/workflows/sbom.md deleted file mode 100644 index 6c1702dee..000000000 --- a/docs/versioned_docs/version-2.23/workflows/sbom.md +++ /dev/null @@ -1,93 +0,0 @@ -# Consume software bill of materials (SBOMs) - - - ---- - -Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). -You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. - -SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). - -Make sure the key is available in a file named `cosign.pub` to execute the following examples. -::: - -## Verify and download SBOMs - -The following sections detail how to work with each type of artifact to verify and extract the SBOM. - -### Constellation CLI - -The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. - -```bash -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom -curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig -cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom -``` - -### Container Images - -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. - -As a consumer, use cosign to download and verify the SBOM: - -```bash -# Verify and download the attestation statement -cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json -# Extract SBOM from attestation statement -jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom -``` - -A successful verification should result in similar output: - -```shell-session -$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom - -Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- -The following checks were performed on each of these signatures: - - The cosign claims were validated - - The signatures were verified against the specified public key -$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom -``` - -:::note - -This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). - -::: - - - -## Vulnerability scanning - -You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. - -Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. - -### Grype - -[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. - -```bash -grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q -``` - -### Dependency Track - -[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.23/workflows/scale.md b/docs/versioned_docs/version-2.23/workflows/scale.md deleted file mode 100644 index 28f19e3f1..000000000 --- a/docs/versioned_docs/version-2.23/workflows/scale.md +++ /dev/null @@ -1,122 +0,0 @@ -# Scale your cluster - -Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. - -## Worker node scaling - -### Autoscaling - -Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of -worker nodes: - -```bash -kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' -``` - -This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). - -Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: - -```bash -# Replace with the name of the scaling group you want to enable autoscaling for -worker_group= -kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. -You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or -`max` fields of the scaling group resource: - -```bash -kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' -kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P -``` - -The cluster autoscaler will now never provision more than 5 worker nodes. - -If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the -following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of -and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of -worker nodes before and after the deployment: - -```bash -kubectl create deployment nginx --image=nginx --replicas 150 -kubectl -n kube-system get nodes -kubectl rollout status deployment nginx -kubectl -n kube-system get nodes -``` - -### Manual scaling - -Alternatively, you can manually scale your cluster up or down: - - - - -1. Go to Auto Scaling Groups and select the worker ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-workers`. -3. Go to **settings** and **scaling**. -4. Set the new **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **worker** instance group. -3. Set the new **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -## Control-plane node scaling - -Control-plane nodes can **only be scaled manually and only scaled up**! - -To increase the number of control-plane nodes, follow these steps: - - - - -1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. -2. Click **Edit** -3. Set the new (increased) **Desired capacity** and **Update**. - - - - -1. Find your Constellation resource group. -2. Select the `scale-set-controlplanes`. -3. Go to **settings** and **scaling**. -4. Set the new (increased) **instance count** and **save**. - - - - -1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). -2. **Edit** the **control-plane** instance group. -3. Set the new (increased) **number of instances** and **save**. - - - - -Dynamic cluster scaling isn't yet supported for STACKIT. -Support will be introduced in one of the upcoming releases. - - - - -If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.23/workflows/storage.md b/docs/versioned_docs/version-2.23/workflows/storage.md deleted file mode 100644 index a5c52be90..000000000 --- a/docs/versioned_docs/version-2.23/workflows/storage.md +++ /dev/null @@ -1,281 +0,0 @@ -# Use persistent storage - -Persistent storage in Kubernetes requires cloud-specific configuration. -For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), -allowing users to mount storage solutions directly into containers. -The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. -Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. - -## Confidential storage - -Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). -Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, GCP, and STACKIT. -However, their encryption takes place in the storage backend and is managed by the CSP. -Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. - -To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, GCE PD, and OpenStack Cinder, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. - -For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). - -## CSI drivers - -Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - - - -**Constellation CSI driver for AWS Elastic Block Store** -Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - - - -**Constellation CSI driver for Azure Disk**: -Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. -See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. -Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - - - -**Constellation CSI driver for GCP Persistent Disk**: -Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - - - -**Constellation CSI driver for STACKIT / OpenStack Cinder** -Mount [Cinder](https://docs.openstack.org/cinder/latest/) block storage volumes into your Constellation cluster. -Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-cloud-provider-openstack) for more information. - - - - -Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. - -## Installation - -The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. -If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - - - -AWS comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -Azure comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -GCP comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -STACKIT comes with two storage classes by default. - -* `encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk -* `integrity-encrypted-rwo` - * Uses [disks of `storage_premium_perf1` type](https://docs.stackit.cloud/stackit/en/service-plans-blockstorage-75137974.html) - * ext-4 filesystem - * Encryption of all data written to disk - * Integrity protection of data written to disk - -For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). - -:::info - -The default storage class is set to `encrypted-rwo` for performance reasons. -If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. - -Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. -Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. - -Note that volume expansion isn't supported for integrity-protected disks. - -::: - - - - -1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - - A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. - It can refer to a storage class. - The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: - - ```bash - cat < - ---- - -You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. - -:::danger - -All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. - -::: - - - -Terminate the cluster by running: - -```bash -constellation terminate -``` - -Or without confirmation (e.g., for automation purposes): - -```bash -constellation terminate --yes -``` - -This deletes all resources created by Constellation in your cloud environment. -All local files created by the `apply` command are deleted as well, except for `constellation-mastersecret.json` and the configuration file. - -:::caution - -Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional -resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. - -::: - - - -Terminate the cluster by running: - -```bash -terraform destroy -``` - -Delete all files that are no longer needed: - -```bash -rm constellation-state.yaml constellation-admin.conf -``` - -Only the `constellation-mastersecret.json` and the configuration file remain. - - - diff --git a/docs/versioned_docs/version-2.23/workflows/terraform-provider.md b/docs/versioned_docs/version-2.23/workflows/terraform-provider.md deleted file mode 100644 index c7a795d3f..000000000 --- a/docs/versioned_docs/version-2.23/workflows/terraform-provider.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use the Terraform provider - -The Constellation Terraform provider allows to manage the full lifecycle of a Constellation cluster (namely creation, upgrades, and deletion) via Terraform. -The provider is available through the [Terraform registry](https://registry.terraform.io/providers/edgelesssys/constellation/latest) and is released in lock-step with Constellation releases. - -## Prerequisites - -- a Linux / Mac operating system (ARM64/AMD64) -- a Terraform installation of version `v1.4.4` or above - -## Quick setup - -This example shows how to set up a Constellation cluster with the reference IAM and infrastructure setup. This setup is also used when creating a Constellation cluster through the Constellation CLI. You can either consume the IAM / infrastructure modules through a remote source (recommended) or local files. The latter requires downloading the infrastructure and IAM modules for the corresponding CSP from `terraform-modules.zip` on the [Constellation release page](https://github.com/edgelesssys/constellation/releases/latest) and placing them in the Terraform workspace directory. - -1. Create a directory (workspace) for your Constellation cluster. - - ```bash - mkdir constellation-workspace - cd constellation-workspace - ``` - -2. Use one of the [example configurations for using the Constellation Terraform provider](https://github.com/edgelesssys/constellation/tree/main/terraform-provider-constellation/examples/full) or create a `main.tf` file and fill it with the resources you want to create. The [Constellation Terraform provider documentation](https://registry.terraform.io/providers/edgelesssys/constellation/latest) offers thorough documentation on the resources and their attributes. -3. Initialize and apply the Terraform configuration. - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -:::info -On SEV-SNP, you need to manually patch the policy of the MAA provider before creating the Constellation cluster, as this feature isn't available in Azure's Terraform provider yet. The Constellation CLI provides a utility for patching, but you can also do it manually. - - ```bash - terraform init - terraform apply -target module.azure_iam # adjust resource path if not using the example configuration - terraform apply -target module.azure_infrastructure # adjust resource path if not using the example configuration - constellation maa-patch $(terraform output -raw maa_url) # adjust output path / input if not using the example configuration or manually patch the resource - terraform apply -target constellation_cluster.azure_example # adjust resource path if not using the example configuration - ``` - - Use the following policy if manually performing the patch. - - ``` - version= 1.0; - authorizationrules - { - [type=="x-ms-azurevm-default-securebootkeysvalidated", value==false] => deny(); - [type=="x-ms-azurevm-debuggersdisabled", value==false] => deny(); - // The line below was edited to use the MAA provider within Constellation. Do not edit manually. - //[type=="secureboot", value==false] => deny(); - [type=="x-ms-azurevm-signingdisabled", value==false] => deny(); - [type=="x-ms-azurevm-dbvalidated", value==false] => deny(); - [type=="x-ms-azurevm-dbxvalidated", value==false] => deny(); - => permit(); - }; - issuancerules - { - }; - ``` - -::: - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - Initialize the providers and apply the configuration. - - ```bash - terraform init - terraform apply - ``` - - Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios. - - - -4. Connect to the cluster. - - ```bash - terraform output -raw kubeconfig > constellation-admin.conf - export KUBECONFIG=$(realpath constellation-admin.conf) - ``` - -## Bringing your own infrastructure - -Instead of using the example infrastructure used in the [quick setup](#quick-setup), you can also provide your own infrastructure. -If you need a starting point for a custom infrastructure setup, you can download the infrastructure / IAM Terraform modules for the respective CSP from the Constellation [GitHub releases](https://github.com/edgelesssys/constellation/releases). You can modify and extend the modules per your requirements, while keeping the basic functionality intact. -The module contains: - -- `{csp}`: cloud resources the cluster runs on -- `iam/{csp}`: IAM resources used within the cluster - -When upgrading your cluster, make sure to check the Constellation release notes for potential breaking changes in the reference infrastructure / IAM modules that need to be considered. - -## Cluster upgrades - -:::tip -Also see the [general documentation on cluster upgrades](./upgrade.md). -::: - -The steps for applying the upgrade are as follows: - -1. Update the version constraint of the Constellation Terraform provider in the `required_providers` block in your Terraform configuration. -2. If you explicitly set any of the version attributes of the provider's resources and data sources (e.g. `image_version` or `constellation_microservice_version`), make sure to update them too. Refer to Constellation's [version support policy](https://github.com/edgelesssys/constellation/blob/main/dev-docs/workflows/versions-support.md) for more information on how each Constellation version and its dependencies are supported. -3. Update the IAM / infrastructure configuration. - - For [remote addresses as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#fetching-archives-over-http), update the version number inside the address of the `source` field of the infrastructure / IAM module to the target version. - - For [local paths as module sources](https://developer.hashicorp.com/terraform/language/modules/sources#local-paths) or when [providing your own infrastructure](#bringing-your-own-infrastructure), see the changes made in the reference modules since the upgrade's origin version and adjust your infrastructure / IAM configuration accordingly. -4. Upgrade the Terraform module and provider dependencies and apply the targeted configuration. - -```bash - terraform init -upgrade - terraform apply -``` diff --git a/docs/versioned_docs/version-2.23/workflows/troubleshooting.md b/docs/versioned_docs/version-2.23/workflows/troubleshooting.md deleted file mode 100644 index 903c829e0..000000000 --- a/docs/versioned_docs/version-2.23/workflows/troubleshooting.md +++ /dev/null @@ -1,200 +0,0 @@ -# Troubleshooting - -This section aids you in finding problems when working with Constellation. - -## Common issues - -### Issues with creating new clusters - -When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). -If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). - -### Azure: Resource Providers can't be registered - -On Azure, you may receive the following error when running `apply` or `terminate` with limited IAM permissions: - -```shell-session -Error: Error ensuring Resource Providers are registered. - -Terraform automatically attempts to register the Resource Providers it supports to -ensure it's able to provision resources. - -If you don't have permission to register Resource Providers you may wish to use the -"skip_provider_registration" flag in the Provider block to disable this functionality. - -[...] -``` - -To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. - -Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `apply` or `terminate` again. -For example: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation apply -``` - -Or alternatively, for `terminate`: - -```bash -ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate -``` - -### Azure: Can't update attestation policy - -On Azure, you may receive the following error when running `apply` from within an Azure environment, e.g., an Azure VM: - -```shell-session -An error occurred: patching policies: updating attestation policy: unexpected status code: 403 Forbidden -``` - -The problem occurs because the Azure SDK we use internally attempts to [authenticate towards the Azure API with the managed identity of your current environment instead of the Azure CLI token](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential). - -We decided not to deviate from this behavior and comply with the ordering of credentials. - -A solution is to add the [required permissions](../getting-started/install.md#required-permissions) to the managed identity of your environment. For example, the managed identity of your Azure VM, instead of the account that you've authenticated with in the Azure CLI. - -If your setup requires a change in the ordering of credentials, please open an issue and explain your desired behavior. - - - -### Nodes fail to join with error `untrusted measurement value` - -This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). -This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. -A failed upgrade due to an erroneous attestation config can also cause this error. -You can change the expected measurements to resolve the failure. - -:::caution - -Attestation and trusted measurements are crucial for the security of your cluster. -Be extra careful when manually changing these settings. -When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -:::tip - -During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: - -```bash -kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" -``` - -::: - -You can use the `apply` command to change measurements of a running cluster: - -1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. -2. Run `constellation apply`. - -Keep in mind that running `apply` also applies any version changes from your config to the cluster. - -You can run these commands to learn about the versions currently configured in the cluster: - -- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` -- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` -- microservices versions: `helm list --filter 'constellation-services' -n kube-system` - -### Upgrading Kubernetes resources fails - -Constellation manages its Kubernetes resources using Helm. -When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, -are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. -If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. - -:::caution - -Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. -Proceed with caution and when in doubt, -check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). - -::: - -## Diagnosing issues - -### Logs - -To get started on diagnosing issues with Constellation, it's often helpful to collect logs from nodes, pods, or other resources in the cluster. Most logs are available through Kubernetes' standard -[logging interfaces](https://kubernetes.io/docs/concepts/cluster-administration/logging/). - -To debug issues occurring at boot time of the nodes, you can use the serial console interface of the CSP while the machine boots to get a read-only view of the boot logs. - -Apart from that, Constellation also offers further [observability integrations](../architecture/observability.md). - -### Node shell access - -Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). - -1. Figure out which node to connect to: - - ```bash - kubectl get nodes - # or to see more information, such as IPs: - kubectl get nodes -o wide - ``` - -2. Connect to the node: - - ```bash - kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox - ``` - - You will be presented with a prompt. - - The nodes file system is mounted at `/host`. - -3. Once finished, clean up the debug pod: - - ```bash - kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj - ``` - -### Emergency SSH access - -Emergency SSH access to nodes can be useful to diagnose issues or download important data even if the Kubernetes API isn't reachable anymore. - -1. Enter the `constellation-terraform` directory in your Constellation workspace and enable emergency SSH access to the cluster: - - ```bash - cd constellation-terraform - echo "emergency_ssh = true" >> ./terraform.tfvars - terraform apply - ``` - -2. Sign an existing SSH key with your master secret: - - ```bash - cd ../ # go back to your Constellation workspace - constellation ssh --key your_public_key.pub - ``` - - A certificate is written to `constellation_cert.pub`. - - The certificate is valid for 24 hours and enables you to access your Constellation nodes using - [certificate based authentication](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Certificate-based_Authentication). - -3. Now you can connect to any Constellation node using your certificate and your private key. - - ```bash - ssh -o CertificateFile=constellation_cert.pub -i root@ - ``` - - Normally, you don't have access to the Constellation nodes since they reside in a private network. - To access those nodes anyways, you can use your Constellation load balancer as a proxy jump host. - For this, use something along the following SSH client configuration: - - ```text - Host - ProxyJump none - - Host * - IdentityFile - PreferredAuthentications publickey - CertificateFile=constellation_cert.pub - User root - ProxyJump - ``` - - With this configuration you can connect to a Constellation node using `ssh -F `. - You can obtain the private node IP and the domain name of the load balancer using your CSP's web UI. diff --git a/docs/versioned_docs/version-2.23/workflows/trusted-launch.md b/docs/versioned_docs/version-2.23/workflows/trusted-launch.md deleted file mode 100644 index d6d01d8eb..000000000 --- a/docs/versioned_docs/version-2.23/workflows/trusted-launch.md +++ /dev/null @@ -1,54 +0,0 @@ -# Use Azure trusted launch VMs - -Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. - -:::caution - -Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. - -::: - -Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. - -## VM images - -Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. - -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. - -After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. -You can use a script to do this: - -```bash -wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh -chmod +x importAzure.sh -AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh -``` - -The script creates the following resources: - -1. A new image gallery with the default name `constellation-import` -2. A new image definition with the default name `constellation` -3. The actual image with the provided version. In this case `2.2.0` - -Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. - -Fetch the image measurements: - -```bash -IMAGE_VERSION=2.2.0 -URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml -constellation config fetch-measurements -u$URL -s$URL.sig -``` - -:::info - -The [`constellation apply`](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: - -```shell-session -Configured image doesn't look like a released production image. Double check image before deploying to production. -``` - -Please ignore this warning. - -::: diff --git a/docs/versioned_docs/version-2.23/workflows/upgrade.md b/docs/versioned_docs/version-2.23/workflows/upgrade.md deleted file mode 100644 index 3db2ecad6..000000000 --- a/docs/versioned_docs/version-2.23/workflows/upgrade.md +++ /dev/null @@ -1,110 +0,0 @@ -# Upgrade your cluster - -Constellation provides an easy way to upgrade all components of your cluster, without disrupting its availability. -Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. -You configure the desired versions in your local Constellation configuration and trigger upgrades with the `apply` command. -To learn about available versions you use the `upgrade check` command. -Which versions are available depends on the CLI version you are using. - -## Update the CLI - -Each CLI comes with a set of supported microservice and Kubernetes versions. -Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. -This means that you have to upgrade your CLI and cluster one minor version at a time. - -For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should - -* upgrade the CLI to v2.7, -* upgrade the cluster to v2.7, -* and only then continue upgrading the CLI (and the cluster) to v2.8 after. - -Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. - -To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). - -## Migrate the configuration - -The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. -Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. -Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. - -## Check for upgrades - -To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: - -```bash -# Show possible upgrades -constellation upgrade check - -# Show possible upgrades and write them to config file -constellation upgrade check --update-config -``` - -You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. -When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. - -## Apply the upgrade - -Once you updated your config with the desired versions, you can trigger the upgrade with this command: - -```bash -constellation apply -``` - -Microservice upgrades will be finished within a few minutes, depending on the cluster size. -If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. - -Image and Kubernetes upgrades take longer. -For each node in your cluster, a new node has to be created and joined. -The process usually takes up to ten minutes per node. - -When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. -You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. -You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. -You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. - -:::note - -For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. -The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. - -::: - -## Check the status - -Upgrades are asynchronous operations. -After you run `apply`, it will take a while until the upgrade has completed. -To understand if an upgrade is finished, you can run: - -```bash -constellation status -``` - -This command displays the following information: - -* The installed services and their versions -* The image and Kubernetes version the cluster is expecting on each node -* How many nodes are up to date - -Here's an example output: - -```shell-session -Target versions: - Image: v2.6.0 - Kubernetes: v1.25.8 -Service versions: - Cilium: v1.12.1 - cert-manager: v1.10.0 - constellation-operators: v2.6.0 - constellation-services: v2.6.0 -Cluster status: Some node versions are out of date - Image: 23/25 - Kubernetes: 25/25 -``` - -This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. -23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. - -## Apply further upgrades - -After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.23/workflows/verify-cli.md b/docs/versioned_docs/version-2.23/workflows/verify-cli.md deleted file mode 100644 index e33569d37..000000000 --- a/docs/versioned_docs/version-2.23/workflows/verify-cli.md +++ /dev/null @@ -1,129 +0,0 @@ -# Verify the CLI - -:::info -This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. -::: - - - ---- - -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. - -:::note -The public key for Edgeless Systems' long-term code-signing key is: - -``` ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT -JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== ------END PUBLIC KEY----- -``` - -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). -::: - -The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. - -You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. - -:::info -You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. -::: - -## Verify the signature - -:::info -This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly. -::: - -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: - -```shell-session -$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -Verified OK -``` - -The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: - -```shell-session -$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 - -tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 -Verified OK -``` - -🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. - -### Optional: Manually inspect the transparency log - -To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) - -```shell-session -$ rekor-cli search --artifact constellation-linux-amd64 - -Found matching entries (listed by UUID): -362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -``` - -With this UUID you can get the full entry from the transparency log: - -```shell-session -$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 - -LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d -Index: 3477047 -IntegratedTime: 2022-09-12T22:28:16Z -UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 -Body: { - "HashedRekordObj": { - "data": { - "hash": { - "algorithm": "sha256", - "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" - } - }, - "signature": { - "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", - "publicKey": { - "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" - } - } - } -} -``` - -The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. - -You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: - -```bash -rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 -``` - -Edgeless Systems monitors this list to detect potential unauthorized use of its private key. - -## Verify the provenance - -Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). - -Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. - -To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). - -:::info -The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. -::: - -Use the verifier to perform the check: - -```shell-session -$ slsa-verifier verify-artifact constellation-linux-amd64 \ - --provenance-path constellation.intoto.jsonl \ - --source-uri github.com/edgelesssys/constellation - -Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... -Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a -PASSED: Verified SLSA provenance -``` diff --git a/docs/versioned_docs/version-2.23/workflows/verify-cluster.md b/docs/versioned_docs/version-2.23/workflows/verify-cluster.md deleted file mode 100644 index b6595ebf2..000000000 --- a/docs/versioned_docs/version-2.23/workflows/verify-cluster.md +++ /dev/null @@ -1,97 +0,0 @@ -# Verify your cluster - -Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. - -## Fetch measurements - -To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: - -```bash -constellation config fetch-measurements -``` - -This command performs the following steps: - -1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. -2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). -3. Write measurements into configuration file. - -The configuration file then contains a list of `measurements` similar to the following: - -```yaml -# ... -measurements: - 0: - expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" - warnOnly: false - 4: - expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" - warnOnly: false - 5: - expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" - warnOnly: true - 8: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 9: - expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" - warnOnly: false - 11: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 12: - expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" - warnOnly: false - 13: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false - 14: - expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" - warnOnly: true - 15: - expected: "0000000000000000000000000000000000000000000000000000000000000000" - warnOnly: false -# ... -``` - -Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). -By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. - -During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. -For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. -For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. -If attestation fails for a new node, it isn't permitted to join the cluster. - -## The *verify* command - -:::note -The steps below are purely optional. They're automatically executed by `constellation apply` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. -::: - -The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. - -```bash -constellation verify [--cluster-id ...] -``` - -From the attestation statement, the command verifies the following properties: - -* The cluster is using the correct Confidential VM (CVM) type. -* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. -* The unique ID of the cluster matches the one from your `constellation-state.yaml` file or passed in via `--cluster-id`. - -Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. - -### Custom arguments - -The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: - -* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. -* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. -* A `constellation-conf.yaml` file with the expected measurements of the cluster in your working directory. - -For example: - -```shell-session -constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= -``` diff --git a/docs/versioned_docs/version-2.3/architecture/attestation.md b/docs/versioned_docs/version-2.3/architecture/attestation.md index 28e8e62cf..f335038f6 100644 --- a/docs/versioned_docs/version-2.3/architecture/attestation.md +++ b/docs/versioned_docs/version-2.3/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.3/architecture/keys.md b/docs/versioned_docs/version-2.3/architecture/keys.md index b7d7ef6f5..aa4e35496 100644 --- a/docs/versioned_docs/version-2.3/architecture/keys.md +++ b/docs/versioned_docs/version-2.3/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.3/getting-started/first-steps.md b/docs/versioned_docs/version-2.3/getting-started/first-steps.md index d1cd06cf6..a749ca6a9 100644 --- a/docs/versioned_docs/version-2.3/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.3/getting-started/first-steps.md @@ -11,29 +11,29 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step 1. Create the configuration file for your selected cloud provider. - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in your current working directory. @@ -41,9 +41,9 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step First you need to create an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest @@ -57,21 +57,21 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step * `northeurope` * `westeurope` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest @@ -88,8 +88,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + Now, fill the output values of the command into the corresponding fields of the `constellation-conf.yaml` file. diff --git a/docs/versioned_docs/version-2.3/getting-started/install.md b/docs/versioned_docs/version-2.3/getting-started/install.md index 36d8f541a..91c4bb14e 100644 --- a/docs/versioned_docs/version-2.3/getting-started/install.md +++ b/docs/versioned_docs/version-2.3/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,15 +108,14 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Compute` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` -- `microsoft.insights` +* `Microsoft.Compute` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` +* `microsoft.insights` By default, Constellation tries to register these automatically if they haven't been registered before. @@ -125,8 +127,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -138,8 +140,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -270,8 +272,8 @@ such as `PowerUserAccess`, or use the following minimal set of permissions: Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -281,8 +283,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -298,8 +300,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -322,8 +324,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -339,9 +341,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.3/overview/clouds.md b/docs/versioned_docs/version-2.3/overview/clouds.md index c48f23cf0..01e7a00c5 100644 --- a/docs/versioned_docs/version-2.3/overview/clouds.md +++ b/docs/versioned_docs/version-2.3/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.3/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.3/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.3/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.3/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.3/overview/product.md b/docs/versioned_docs/version-2.3/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.3/overview/product.md +++ b/docs/versioned_docs/version-2.3/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.3/workflows/config.md b/docs/versioned_docs/version-2.3/workflows/config.md index 20aa0dada..afd53812e 100644 --- a/docs/versioned_docs/version-2.3/workflows/config.md +++ b/docs/versioned_docs/version-2.3/workflows/config.md @@ -6,62 +6,62 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the N2D family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. ## Creating an IAM configuration You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session. @@ -84,23 +84,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session. ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session. @@ -122,16 +122,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -175,19 +175,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -201,9 +201,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -232,9 +232,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.3/workflows/recovery.md b/docs/versioned_docs/version-2.3/workflows/recovery.md index 0fd171036..fd610fc67 100644 --- a/docs/versioned_docs/version-2.3/workflows/recovery.md +++ b/docs/versioned_docs/version-2.3/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.3/workflows/sbom.md b/docs/versioned_docs/version-2.3/workflows/sbom.md index e8ba25a64..ec9834b4f 100644 --- a/docs/versioned_docs/version-2.3/workflows/sbom.md +++ b/docs/versioned_docs/version-2.3/workflows/sbom.md @@ -15,7 +15,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -36,7 +36,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.3/workflows/scale.md b/docs/versioned_docs/version-2.3/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.3/workflows/scale.md +++ b/docs/versioned_docs/version-2.3/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.3/workflows/storage.md b/docs/versioned_docs/version-2.3/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.3/workflows/storage.md +++ b/docs/versioned_docs/version-2.3/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.3/workflows/troubleshooting.md b/docs/versioned_docs/version-2.3/workflows/troubleshooting.md index f948f5d06..3a28c9cd0 100644 --- a/docs/versioned_docs/version-2.3/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.3/workflows/troubleshooting.md @@ -5,7 +5,6 @@ This section aids you in finding problems when working with Constellation. ## Azure: Resource Providers can't be registered On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: - ```shell-session Error: Error ensuring Resource Providers are registered. @@ -22,13 +21,11 @@ To continue, please ensure that the [required resource providers](../getting-sta Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. For example: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation create --control-plane-nodes 1 --worker-nodes 2 -y ``` Or alternatively, for `terminate`: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` @@ -39,8 +36,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -50,8 +47,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -66,16 +63,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ## Connect to nodes diff --git a/docs/versioned_docs/version-2.3/workflows/trusted-launch.md b/docs/versioned_docs/version-2.3/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.3/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.3/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.3/workflows/verify-cli.md b/docs/versioned_docs/version-2.3/workflows/verify-cli.md index 01a2583d6..4f6008cd0 100644 --- a/docs/versioned_docs/version-2.3/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.3/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.4/architecture/attestation.md b/docs/versioned_docs/version-2.4/architecture/attestation.md index 28e8e62cf..f335038f6 100644 --- a/docs/versioned_docs/version-2.4/architecture/attestation.md +++ b/docs/versioned_docs/version-2.4/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.4/architecture/keys.md b/docs/versioned_docs/version-2.4/architecture/keys.md index b7d7ef6f5..aa4e35496 100644 --- a/docs/versioned_docs/version-2.4/architecture/keys.md +++ b/docs/versioned_docs/version-2.4/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.4/getting-started/first-steps.md b/docs/versioned_docs/version-2.4/getting-started/first-steps.md index 44e66ea95..768e8dfcd 100644 --- a/docs/versioned_docs/version-2.4/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.4/getting-started/first-steps.md @@ -11,29 +11,29 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step 1. Create the configuration file for your selected cloud provider. - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in your current working directory. @@ -41,9 +41,9 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step First you need to create an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest @@ -57,21 +57,21 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step * `northeurope` * `westeurope` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest @@ -88,8 +88,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + Now, fill the output values of the command into the corresponding fields of the `constellation-conf.yaml` file. diff --git a/docs/versioned_docs/version-2.4/getting-started/install.md b/docs/versioned_docs/version-2.4/getting-started/install.md index 36d8f541a..91c4bb14e 100644 --- a/docs/versioned_docs/version-2.4/getting-started/install.md +++ b/docs/versioned_docs/version-2.4/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,15 +108,14 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Compute` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` -- `microsoft.insights` +* `Microsoft.Compute` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` +* `microsoft.insights` By default, Constellation tries to register these automatically if they haven't been registered before. @@ -125,8 +127,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -138,8 +140,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -270,8 +272,8 @@ such as `PowerUserAccess`, or use the following minimal set of permissions: Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -281,8 +283,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -298,8 +300,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -322,8 +324,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -339,9 +341,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.4/overview/clouds.md b/docs/versioned_docs/version-2.4/overview/clouds.md index c48f23cf0..01e7a00c5 100644 --- a/docs/versioned_docs/version-2.4/overview/clouds.md +++ b/docs/versioned_docs/version-2.4/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.4/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.4/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.4/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.4/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.4/overview/product.md b/docs/versioned_docs/version-2.4/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.4/overview/product.md +++ b/docs/versioned_docs/version-2.4/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.4/workflows/config.md b/docs/versioned_docs/version-2.4/workflows/config.md index 20aa0dada..afd53812e 100644 --- a/docs/versioned_docs/version-2.4/workflows/config.md +++ b/docs/versioned_docs/version-2.4/workflows/config.md @@ -6,62 +6,62 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the N2D family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. ## Creating an IAM configuration You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session. @@ -84,23 +84,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session. ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session. @@ -122,16 +122,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -175,19 +175,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -201,9 +201,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -232,9 +232,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.4/workflows/recovery.md b/docs/versioned_docs/version-2.4/workflows/recovery.md index 0fd171036..fd610fc67 100644 --- a/docs/versioned_docs/version-2.4/workflows/recovery.md +++ b/docs/versioned_docs/version-2.4/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.4/workflows/sbom.md b/docs/versioned_docs/version-2.4/workflows/sbom.md index e8ba25a64..ec9834b4f 100644 --- a/docs/versioned_docs/version-2.4/workflows/sbom.md +++ b/docs/versioned_docs/version-2.4/workflows/sbom.md @@ -15,7 +15,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -36,7 +36,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.4/workflows/scale.md b/docs/versioned_docs/version-2.4/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.4/workflows/scale.md +++ b/docs/versioned_docs/version-2.4/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.4/workflows/storage.md b/docs/versioned_docs/version-2.4/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.4/workflows/storage.md +++ b/docs/versioned_docs/version-2.4/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.4/workflows/troubleshooting.md b/docs/versioned_docs/version-2.4/workflows/troubleshooting.md index f948f5d06..3a28c9cd0 100644 --- a/docs/versioned_docs/version-2.4/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.4/workflows/troubleshooting.md @@ -5,7 +5,6 @@ This section aids you in finding problems when working with Constellation. ## Azure: Resource Providers can't be registered On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: - ```shell-session Error: Error ensuring Resource Providers are registered. @@ -22,13 +21,11 @@ To continue, please ensure that the [required resource providers](../getting-sta Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. For example: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation create --control-plane-nodes 1 --worker-nodes 2 -y ``` Or alternatively, for `terminate`: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` @@ -39,8 +36,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -50,8 +47,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -66,16 +63,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ## Connect to nodes diff --git a/docs/versioned_docs/version-2.4/workflows/trusted-launch.md b/docs/versioned_docs/version-2.4/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.4/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.4/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.4/workflows/verify-cli.md b/docs/versioned_docs/version-2.4/workflows/verify-cli.md index 01a2583d6..4f6008cd0 100644 --- a/docs/versioned_docs/version-2.4/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.4/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.5/architecture/attestation.md b/docs/versioned_docs/version-2.5/architecture/attestation.md index 28e8e62cf..f335038f6 100644 --- a/docs/versioned_docs/version-2.5/architecture/attestation.md +++ b/docs/versioned_docs/version-2.5/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.5/architecture/keys.md b/docs/versioned_docs/version-2.5/architecture/keys.md index b7d7ef6f5..aa4e35496 100644 --- a/docs/versioned_docs/version-2.5/architecture/keys.md +++ b/docs/versioned_docs/version-2.5/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.5/getting-started/first-steps.md b/docs/versioned_docs/version-2.5/getting-started/first-steps.md index 9ce1d6be2..4e89bb0f2 100644 --- a/docs/versioned_docs/version-2.5/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.5/getting-started/first-steps.md @@ -13,9 +13,9 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step First, you need to create a [configuration file](../workflows/config.md) and an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --generate-config @@ -29,21 +29,21 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step * `northeurope` * `westeurope` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --generate-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --generate-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest --generate-config @@ -60,8 +60,8 @@ If you don't have a cloud subscription, check out [MiniConstellation](first-step You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.5/getting-started/install.md b/docs/versioned_docs/version-2.5/getting-started/install.md index 36d8f541a..91c4bb14e 100644 --- a/docs/versioned_docs/version-2.5/getting-started/install.md +++ b/docs/versioned_docs/version-2.5/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,15 +108,14 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Compute` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` -- `microsoft.insights` +* `Microsoft.Compute` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` +* `microsoft.insights` By default, Constellation tries to register these automatically if they haven't been registered before. @@ -125,8 +127,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -138,8 +140,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -270,8 +272,8 @@ such as `PowerUserAccess`, or use the following minimal set of permissions: Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -281,8 +283,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -298,8 +300,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -322,8 +324,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -339,9 +341,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.5/overview/clouds.md b/docs/versioned_docs/version-2.5/overview/clouds.md index c48f23cf0..dd31f866f 100644 --- a/docs/versioned_docs/version-2.5/overview/clouds.md +++ b/docs/versioned_docs/version-2.5/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.5/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.5/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.5/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.5/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.5/overview/product.md b/docs/versioned_docs/version-2.5/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.5/overview/product.md +++ b/docs/versioned_docs/version-2.5/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.5/workflows/config.md b/docs/versioned_docs/version-2.5/workflows/config.md index 71f2d019d..100dedc8c 100644 --- a/docs/versioned_docs/version-2.5/workflows/config.md +++ b/docs/versioned_docs/version-2.5/workflows/config.md @@ -6,29 +6,29 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. @@ -39,25 +39,25 @@ You can also automatically generate a configuration file by adding the `--genera ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the N2D family. Refer to [N2D machine series](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) or run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. @@ -66,8 +66,8 @@ Fill the desired VM type into the **instanceType** field in the `constellation-c You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you haven't generated a configuration file yet, you can do so by adding the `--generate-config` flag to the command. This creates a configuration file and populates it with the created IAM values. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session. @@ -90,23 +90,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session. ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session. @@ -128,16 +128,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -181,19 +181,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -207,9 +207,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -238,9 +238,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.5/workflows/create.md b/docs/versioned_docs/version-2.5/workflows/create.md index 115effbdf..7fc991fd7 100644 --- a/docs/versioned_docs/version-2.5/workflows/create.md +++ b/docs/versioned_docs/version-2.5/workflows/create.md @@ -18,8 +18,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + Choose the initial size of your cluster. The following command creates a cluster with one control-plane and two worker nodes: @@ -32,8 +32,8 @@ For details on the flags, consult the command help via `constellation create -h` *create* stores your cluster's state into a [`terraform.tfstate`](../architecture/orchestration.md#cluster-creation-process) file in your workspace. - - + + Constellation supports managing the infrastructure via Terraform. This allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -68,8 +68,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.5/workflows/recovery.md b/docs/versioned_docs/version-2.5/workflows/recovery.md index 0fd171036..fd610fc67 100644 --- a/docs/versioned_docs/version-2.5/workflows/recovery.md +++ b/docs/versioned_docs/version-2.5/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.5/workflows/sbom.md b/docs/versioned_docs/version-2.5/workflows/sbom.md index e8ba25a64..ec9834b4f 100644 --- a/docs/versioned_docs/version-2.5/workflows/sbom.md +++ b/docs/versioned_docs/version-2.5/workflows/sbom.md @@ -15,7 +15,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -36,7 +36,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.5/workflows/scale.md b/docs/versioned_docs/version-2.5/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.5/workflows/scale.md +++ b/docs/versioned_docs/version-2.5/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.5/workflows/storage.md b/docs/versioned_docs/version-2.5/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.5/workflows/storage.md +++ b/docs/versioned_docs/version-2.5/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.5/workflows/terminate.md b/docs/versioned_docs/version-2.5/workflows/terminate.md index ee64a2784..a2556fe95 100644 --- a/docs/versioned_docs/version-2.5/workflows/terminate.md +++ b/docs/versioned_docs/version-2.5/workflows/terminate.md @@ -8,8 +8,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -32,8 +32,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -48,5 +48,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.5/workflows/troubleshooting.md b/docs/versioned_docs/version-2.5/workflows/troubleshooting.md index f948f5d06..3a28c9cd0 100644 --- a/docs/versioned_docs/version-2.5/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.5/workflows/troubleshooting.md @@ -5,7 +5,6 @@ This section aids you in finding problems when working with Constellation. ## Azure: Resource Providers can't be registered On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: - ```shell-session Error: Error ensuring Resource Providers are registered. @@ -22,13 +21,11 @@ To continue, please ensure that the [required resource providers](../getting-sta Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. For example: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation create --control-plane-nodes 1 --worker-nodes 2 -y ``` Or alternatively, for `terminate`: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` @@ -39,8 +36,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -50,8 +47,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -66,16 +63,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ## Connect to nodes diff --git a/docs/versioned_docs/version-2.5/workflows/trusted-launch.md b/docs/versioned_docs/version-2.5/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.5/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.5/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.5/workflows/verify-cli.md b/docs/versioned_docs/version-2.5/workflows/verify-cli.md index 01a2583d6..4f6008cd0 100644 --- a/docs/versioned_docs/version-2.5/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.5/workflows/verify-cli.md @@ -1,6 +1,6 @@ # Verify the CLI -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -12,7 +12,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -25,7 +25,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.6/architecture/attestation.md b/docs/versioned_docs/version-2.6/architecture/attestation.md index 20f9909fd..0c7a1487b 100644 --- a/docs/versioned_docs/version-2.6/architecture/attestation.md +++ b/docs/versioned_docs/version-2.6/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.6/architecture/keys.md b/docs/versioned_docs/version-2.6/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.6/architecture/keys.md +++ b/docs/versioned_docs/version-2.6/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.6/getting-started/first-steps.md b/docs/versioned_docs/version-2.6/getting-started/first-steps.md index df489f52a..ad89d89c8 100644 --- a/docs/versioned_docs/version-2.6/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.6/getting-started/first-steps.md @@ -17,9 +17,9 @@ If you encounter any problem with the following steps, make sure to use the [lat First, you need to create a [configuration file](../workflows/config.md) and an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --generate-config @@ -33,21 +33,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `northeurope` * `westeurope` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --generate-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --generate-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest --generate-config @@ -64,8 +64,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.6/getting-started/install.md b/docs/versioned_docs/version-2.6/getting-started/install.md index 36d8f541a..91c4bb14e 100644 --- a/docs/versioned_docs/version-2.6/getting-started/install.md +++ b/docs/versioned_docs/version-2.6/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,15 +108,14 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Compute` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` -- `microsoft.insights` +* `Microsoft.Compute` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` +* `microsoft.insights` By default, Constellation tries to register these automatically if they haven't been registered before. @@ -125,8 +127,8 @@ You need the following permissions for your user account: If you don't have these permissions with scope *subscription*, ask your administrator to [create the service account and a resource group for your Constellation cluster](first-steps.md). Your user account needs the `Contributor` permission scoped to this resource group. - - + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. @@ -138,8 +140,8 @@ You need the following permissions on this project: Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -270,8 +272,8 @@ such as `PowerUserAccess`, or use the following minimal set of permissions: Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -281,8 +283,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -298,8 +300,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -322,8 +324,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -339,9 +341,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.6/overview/clouds.md b/docs/versioned_docs/version-2.6/overview/clouds.md index c48f23cf0..dd31f866f 100644 --- a/docs/versioned_docs/version-2.6/overview/clouds.md +++ b/docs/versioned_docs/version-2.6/overview/clouds.md @@ -24,11 +24,11 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. +The [CVMs available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. This impacts attestation capabilities. Currently, GCP doesn't offer CVM-based attestation at all. Instead, GCP provides attestation statements based on its regular [vTPM](https://cloud.google.com/blog/products/identity-security/virtual-trusted-platform-module-for-shielded-vms-security-in-plaintext), which is managed by the hypervisor. On GCP, the hypervisor is thus currently part of Constellation's TCB. ## Amazon Web Services (AWS) diff --git a/docs/versioned_docs/version-2.6/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.6/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.6/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.6/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.6/overview/performance.md b/docs/versioned_docs/version-2.6/overview/performance.md index aef594c46..54f31019a 100644 --- a/docs/versioned_docs/version-2.6/overview/performance.md +++ b/docs/versioned_docs/version-2.6/overview/performance.md @@ -63,6 +63,7 @@ The following infrastructure configurations was used: - CVM: `false` - Zone: `europe-west3-b` + ### Results #### Network @@ -70,7 +71,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. @@ -78,10 +79,11 @@ Therefore, to make the test comparable, both AKS and Constellation on Azure were Constellation on Azure and AKS used an MTU of 1500. Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. + The difference in network bandwidth can largely be attributed to two factors. -- Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. +* Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. +* [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. ##### Pod-to-Pod @@ -132,7 +134,6 @@ The results for "Pod-to-Pod" on GCP are as follows: In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - #### Storage I/O Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). @@ -142,25 +143,21 @@ Similarly, upon a PVC request, Constellation will provision a PV via a default s For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - - 6400 (20000 burst) IOPS - 144 MB/s (600 MB/s burst) throughput However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - - 500 (600 burst) IOPS - 60 MB/s (150 MB/s burst) throughput For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - - 3,000 read IOPS - 15,000 write IOPS - 240 MB/s read throughput - 240 MB/s write throughput However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - - 2400 read IOPS - 2400 write IOPS - 112 MB/s read throughput @@ -181,7 +178,8 @@ The following `fio` settings were used: - IOPS: 4 KB blocks and 128 iodepth - Bandwidth: 1024 KB blocks and 128 iodepth -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). +For more details, see the [`fio` test configuration](../../../../.github/actions/e2e_benchmark/fio.ini). + The results for IOPS on Azure are as follows: diff --git a/docs/versioned_docs/version-2.6/overview/product.md b/docs/versioned_docs/version-2.6/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.6/overview/product.md +++ b/docs/versioned_docs/version-2.6/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.6/workflows/config.md b/docs/versioned_docs/version-2.6/workflows/config.md index 56979ee13..5da01beeb 100644 --- a/docs/versioned_docs/version-2.6/workflows/config.md +++ b/docs/versioned_docs/version-2.6/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,29 +14,29 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. @@ -47,25 +47,25 @@ You can also automatically generate a configuration file by adding the `--genera ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. @@ -79,8 +79,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you haven't generated a configuration file yet, you can do so by adding the `--generate-config` flag to the command. This creates a configuration file and populates it with the created IAM values. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session. @@ -103,23 +103,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session. ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session. @@ -141,16 +141,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -194,19 +194,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -220,9 +220,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -251,9 +251,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.6/workflows/create.md b/docs/versioned_docs/version-2.6/workflows/create.md index 81ed55582..d527618b0 100644 --- a/docs/versioned_docs/version-2.6/workflows/create.md +++ b/docs/versioned_docs/version-2.6/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + Choose the initial size of your cluster. The following command creates a cluster with one control-plane and two worker nodes: @@ -40,8 +40,8 @@ For details on the flags, consult the command help via `constellation create -h` *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -80,8 +80,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.6/workflows/recovery.md b/docs/versioned_docs/version-2.6/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.6/workflows/recovery.md +++ b/docs/versioned_docs/version-2.6/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.6/workflows/sbom.md b/docs/versioned_docs/version-2.6/workflows/sbom.md index 92550c182..44b347a55 100644 --- a/docs/versioned_docs/version-2.6/workflows/sbom.md +++ b/docs/versioned_docs/version-2.6/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -19,7 +19,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +40,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.6/workflows/scale.md b/docs/versioned_docs/version-2.6/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.6/workflows/scale.md +++ b/docs/versioned_docs/version-2.6/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.6/workflows/storage.md b/docs/versioned_docs/version-2.6/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.6/workflows/storage.md +++ b/docs/versioned_docs/version-2.6/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.6/workflows/terminate.md b/docs/versioned_docs/version-2.6/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.6/workflows/terminate.md +++ b/docs/versioned_docs/version-2.6/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.6/workflows/troubleshooting.md b/docs/versioned_docs/version-2.6/workflows/troubleshooting.md index 6bdf1d75c..801bb995a 100644 --- a/docs/versioned_docs/version-2.6/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.6/workflows/troubleshooting.md @@ -10,7 +10,6 @@ If something doesn't work, check out the [known issues](https://github.com/edgel ## Azure: Resource Providers can't be registered On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: - ```shell-session Error: Error ensuring Resource Providers are registered. @@ -27,13 +26,11 @@ To continue, please ensure that the [required resource providers](../getting-sta Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. For example: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation create --control-plane-nodes 1 --worker-nodes 2 -y ``` Or alternatively, for `terminate`: - ```bash ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate ``` @@ -44,8 +41,8 @@ To provide information during early stages of the node's boot process, Constella You can view these information in the follow places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -55,8 +52,8 @@ You can view these information in the follow places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -71,16 +68,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ## Connect to nodes diff --git a/docs/versioned_docs/version-2.6/workflows/trusted-launch.md b/docs/versioned_docs/version-2.6/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.6/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.6/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.6/workflows/verify-cli.md b/docs/versioned_docs/version-2.6/workflows/verify-cli.md index aa2df4be4..1280c51b0 100644 --- a/docs/versioned_docs/version-2.6/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.6/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,7 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.7/architecture/attestation.md b/docs/versioned_docs/version-2.7/architecture/attestation.md index 20f9909fd..0c7a1487b 100644 --- a/docs/versioned_docs/version-2.7/architecture/attestation.md +++ b/docs/versioned_docs/version-2.7/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,8 +217,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.7/architecture/keys.md b/docs/versioned_docs/version-2.7/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.7/architecture/keys.md +++ b/docs/versioned_docs/version-2.7/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.7/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.7/getting-started/first-steps-local.md index 81d8e141d..707074bb9 100644 --- a/docs/versioned_docs/version-2.7/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.7/getting-started/first-steps-local.md @@ -30,8 +30,8 @@ Both options use virtualization to create a local cluster with control-plane nod ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -59,8 +59,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -138,8 +138,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -192,8 +192,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -204,8 +204,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -233,8 +233,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.7/getting-started/first-steps.md b/docs/versioned_docs/version-2.7/getting-started/first-steps.md index c1c3a3fe0..1569afc14 100644 --- a/docs/versioned_docs/version-2.7/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.7/getting-started/first-steps.md @@ -17,9 +17,9 @@ If you encounter any problem with the following steps, make sure to use the [lat First, you need to create a [configuration file](../workflows/config.md) and an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --generate-config @@ -33,21 +33,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `northeurope` * `westeurope` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --generate-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --generate-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest --generate-config @@ -66,8 +66,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.7/getting-started/install.md b/docs/versioned_docs/version-2.7/getting-started/install.md index ac0ef3da8..9ba727d81 100644 --- a/docs/versioned_docs/version-2.7/getting-started/install.md +++ b/docs/versioned_docs/version-2.7/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,41 +108,38 @@ If you don't have a cloud subscription, you can try [MiniConstellation](first-st ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` The built-in `Contributor` role is a superset of these permissions. @@ -147,91 +147,91 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -366,8 +366,8 @@ The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -377,8 +377,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -394,8 +394,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -418,8 +418,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -435,9 +435,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.7/overview/clouds.md b/docs/versioned_docs/version-2.7/overview/clouds.md index 30995a012..c95b3508a 100644 --- a/docs/versioned_docs/version-2.7/overview/clouds.md +++ b/docs/versioned_docs/version-2.7/overview/clouds.md @@ -24,23 +24,24 @@ The following table summarizes the state of features for different infrastructur With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. Regarding (3), Azure provides direct access to remote-attestation statements. However, regarding (4), the standard CVMs still include closed-source firmware running in VM Privilege Level (VMPL) 0. This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. Intel and Google have [collaborated](https://cloud.google.com/blog/products/identity-security/rsa-google-intel-confidential-computing-more-secure) to enhance the security of TDX, and have recently [revealed](https://venturebeat.com/security/intel-launches-confidential-computing-solution-for-virtual-machines/) their plans to make TDX compatible with Google Cloud. ## Amazon Web Services (AWS) - Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). Regarding (3), AWS provides direct access to remote-attestation statements. However, attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. Hence, the hypervisor is currently part of Constellation's TCB. \* Regarding (4), the CVMs include initial firmware inside the CVM based on [OVMF](https://github.com/tianocore/tianocore.github.io/wiki/OVMF). Once this firmware will be reproducible and therefore verifiable, (4) switches from *No* to *Yes*. + + ## OpenStack OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. diff --git a/docs/versioned_docs/version-2.7/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.7/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.7/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.7/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.7/overview/performance.md b/docs/versioned_docs/version-2.7/overview/performance.md index aef594c46..54f31019a 100644 --- a/docs/versioned_docs/version-2.7/overview/performance.md +++ b/docs/versioned_docs/version-2.7/overview/performance.md @@ -63,6 +63,7 @@ The following infrastructure configurations was used: - CVM: `false` - Zone: `europe-west3-b` + ### Results #### Network @@ -70,7 +71,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. @@ -78,10 +79,11 @@ Therefore, to make the test comparable, both AKS and Constellation on Azure were Constellation on Azure and AKS used an MTU of 1500. Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. + The difference in network bandwidth can largely be attributed to two factors. -- Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. +* Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. +* [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. ##### Pod-to-Pod @@ -132,7 +134,6 @@ The results for "Pod-to-Pod" on GCP are as follows: In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - #### Storage I/O Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). @@ -142,25 +143,21 @@ Similarly, upon a PVC request, Constellation will provision a PV via a default s For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - - 6400 (20000 burst) IOPS - 144 MB/s (600 MB/s burst) throughput However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - - 500 (600 burst) IOPS - 60 MB/s (150 MB/s burst) throughput For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - - 3,000 read IOPS - 15,000 write IOPS - 240 MB/s read throughput - 240 MB/s write throughput However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - - 2400 read IOPS - 2400 write IOPS - 112 MB/s read throughput @@ -181,7 +178,8 @@ The following `fio` settings were used: - IOPS: 4 KB blocks and 128 iodepth - Bandwidth: 1024 KB blocks and 128 iodepth -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). +For more details, see the [`fio` test configuration](../../../../.github/actions/e2e_benchmark/fio.ini). + The results for IOPS on Azure are as follows: diff --git a/docs/versioned_docs/version-2.7/overview/product.md b/docs/versioned_docs/version-2.7/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.7/overview/product.md +++ b/docs/versioned_docs/version-2.7/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.7/workflows/config.md b/docs/versioned_docs/version-2.7/workflows/config.md index 032b22943..dd86a34a2 100644 --- a/docs/versioned_docs/version-2.7/workflows/config.md +++ b/docs/versioned_docs/version-2.7/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,29 +14,29 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. @@ -47,25 +47,25 @@ You can also automatically generate a configuration file by adding the `--genera ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. @@ -79,8 +79,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you haven't generated a configuration file yet, you can do so by adding the `--generate-config` flag to the command. This creates a configuration file and populates it with the created IAM values. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -103,23 +103,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -143,16 +143,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -196,19 +196,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -222,9 +222,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -255,9 +255,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.7/workflows/create.md b/docs/versioned_docs/version-2.7/workflows/create.md index 5c4dd2948..aff59bb6a 100644 --- a/docs/versioned_docs/version-2.7/workflows/create.md +++ b/docs/versioned_docs/version-2.7/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + Choose the initial size of your cluster. The following command creates a cluster with one control-plane and two worker nodes: @@ -40,8 +40,8 @@ For details on the flags, consult the command help via `constellation create -h` *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -80,8 +80,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.7/workflows/recovery.md b/docs/versioned_docs/version-2.7/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.7/workflows/recovery.md +++ b/docs/versioned_docs/version-2.7/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.7/workflows/sbom.md b/docs/versioned_docs/version-2.7/workflows/sbom.md index 92550c182..44b347a55 100644 --- a/docs/versioned_docs/version-2.7/workflows/sbom.md +++ b/docs/versioned_docs/version-2.7/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -19,7 +19,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +40,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.7/workflows/scale.md b/docs/versioned_docs/version-2.7/workflows/scale.md index bce045c66..3b7c0d479 100644 --- a/docs/versioned_docs/version-2.7/workflows/scale.md +++ b/docs/versioned_docs/version-2.7/workflows/scale.md @@ -48,23 +48,23 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + :::caution @@ -72,8 +72,8 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + ## Control-plane node scaling @@ -81,24 +81,24 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + :::caution @@ -106,7 +106,7 @@ Scaling isn't yet implemented for AWS. If you require this feature, [let us know ::: - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.7/workflows/storage.md b/docs/versioned_docs/version-2.7/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.7/workflows/storage.md +++ b/docs/versioned_docs/version-2.7/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.7/workflows/terminate.md b/docs/versioned_docs/version-2.7/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.7/workflows/terminate.md +++ b/docs/versioned_docs/version-2.7/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.7/workflows/troubleshooting.md b/docs/versioned_docs/version-2.7/workflows/troubleshooting.md index cd095be28..2ddf3335d 100644 --- a/docs/versioned_docs/version-2.7/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.7/workflows/troubleshooting.md @@ -75,8 +75,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -86,8 +86,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -102,16 +102,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.7/workflows/trusted-launch.md b/docs/versioned_docs/version-2.7/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.7/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.7/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.7/workflows/verify-cli.md b/docs/versioned_docs/version-2.7/workflows/verify-cli.md index aa2df4be4..1280c51b0 100644 --- a/docs/versioned_docs/version-2.7/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.7/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,7 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.8/architecture/attestation.md b/docs/versioned_docs/version-2.8/architecture/attestation.md index 592063193..07ac3aa72 100644 --- a/docs/versioned_docs/version-2.8/architecture/attestation.md +++ b/docs/versioned_docs/version-2.8/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.8/architecture/keys.md b/docs/versioned_docs/version-2.8/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.8/architecture/keys.md +++ b/docs/versioned_docs/version-2.8/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.8/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.8/getting-started/first-steps-local.md index 81d8e141d..707074bb9 100644 --- a/docs/versioned_docs/version-2.8/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.8/getting-started/first-steps-local.md @@ -30,8 +30,8 @@ Both options use virtualization to create a local cluster with control-plane nod ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -59,8 +59,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -138,8 +138,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -192,8 +192,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -204,8 +204,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -233,8 +233,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.8/getting-started/first-steps.md b/docs/versioned_docs/version-2.8/getting-started/first-steps.md index 8d5cc5cbb..4449e6d37 100644 --- a/docs/versioned_docs/version-2.8/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.8/getting-started/first-steps.md @@ -17,9 +17,9 @@ If you encounter any problem with the following steps, make sure to use the [lat First, you need to create a [configuration file](../workflows/config.md) and an [IAM configuration](../workflows/config.md#creating-an-iam-configuration). The easiest way to do this is the following CLI command: - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --generate-config @@ -34,21 +34,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --generate-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --generate-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also creates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=eu-central-1a --prefix=constellTest --generate-config @@ -67,8 +67,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.8/getting-started/install.md b/docs/versioned_docs/version-2.8/getting-started/install.md index 2aa274b98..37940d0a2 100644 --- a/docs/versioned_docs/version-2.8/getting-started/install.md +++ b/docs/versioned_docs/version-2.8/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,91 +148,91 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -367,8 +367,8 @@ The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -378,8 +378,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -395,8 +395,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -419,8 +419,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -436,9 +436,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.8/overview/clouds.md b/docs/versioned_docs/version-2.8/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.8/overview/clouds.md +++ b/docs/versioned_docs/version-2.8/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.8/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.8/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.8/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.8/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.8/overview/performance.md b/docs/versioned_docs/version-2.8/overview/performance.md index aef594c46..54f31019a 100644 --- a/docs/versioned_docs/version-2.8/overview/performance.md +++ b/docs/versioned_docs/version-2.8/overview/performance.md @@ -63,6 +63,7 @@ The following infrastructure configurations was used: - CVM: `false` - Zone: `europe-west3-b` + ### Results #### Network @@ -70,7 +71,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. @@ -78,10 +79,11 @@ Therefore, to make the test comparable, both AKS and Constellation on Azure were Constellation on Azure and AKS used an MTU of 1500. Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. + The difference in network bandwidth can largely be attributed to two factors. -- Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. -- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. +* Constellation's [network encryption](../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. +* [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. ##### Pod-to-Pod @@ -132,7 +134,6 @@ The results for "Pod-to-Pod" on GCP are as follows: In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. - #### Storage I/O Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). @@ -142,25 +143,21 @@ Similarly, upon a PVC request, Constellation will provision a PV via a default s For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: - - 6400 (20000 burst) IOPS - 144 MB/s (600 MB/s burst) throughput However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): - - 500 (600 burst) IOPS - 60 MB/s (150 MB/s burst) throughput For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): - - 3,000 read IOPS - 15,000 write IOPS - 240 MB/s read throughput - 240 MB/s write throughput However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: - - 2400 read IOPS - 2400 write IOPS - 112 MB/s read throughput @@ -181,7 +178,8 @@ The following `fio` settings were used: - IOPS: 4 KB blocks and 128 iodepth - Bandwidth: 1024 KB blocks and 128 iodepth -For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). +For more details, see the [`fio` test configuration](../../../../.github/actions/e2e_benchmark/fio.ini). + The results for IOPS on Azure are as follows: diff --git a/docs/versioned_docs/version-2.8/overview/product.md b/docs/versioned_docs/version-2.8/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.8/overview/product.md +++ b/docs/versioned_docs/version-2.8/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.8/workflows/config.md b/docs/versioned_docs/version-2.8/workflows/config.md index f10ba14ec..260c4e6ec 100644 --- a/docs/versioned_docs/version-2.8/workflows/config.md +++ b/docs/versioned_docs/version-2.8/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,29 +14,29 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. @@ -47,25 +47,25 @@ You can also automatically generate a configuration file by adding the `--genera ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all nitroTPM-enabled machines with a minimum of 4 vCPUs (`xlarge` or larger). Refer to the [list of nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html) or run `constellation config instance-types` to get the list of all supported options. - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. @@ -79,8 +79,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you haven't generated a configuration file yet, you can do so by adding the `--generate-config` flag to the command. This creates a configuration file and populates it with the created IAM values. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -104,23 +104,23 @@ Paste the output into the corresponding fields of the `constellation-conf.yaml` Since `clientSecretValue` is a sensitive value, you can leave it empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -144,16 +144,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -198,19 +198,19 @@ The following describes the configuration fields and how you obtain the required Since this is a sensitive value, alternatively you can leave `clientSecretValue` empty in the configuration file and pass it via an environment variable instead. To this end, create the environment variable `CONSTELL_AZURE_CLIENT_SECRET_VALUE` and set it to the secret value. ::: - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -224,9 +224,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -257,9 +257,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.8/workflows/create.md b/docs/versioned_docs/version-2.8/workflows/create.md index 5c4dd2948..aff59bb6a 100644 --- a/docs/versioned_docs/version-2.8/workflows/create.md +++ b/docs/versioned_docs/version-2.8/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + Choose the initial size of your cluster. The following command creates a cluster with one control-plane and two worker nodes: @@ -40,8 +40,8 @@ For details on the flags, consult the command help via `constellation create -h` *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -80,8 +80,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.8/workflows/recovery.md b/docs/versioned_docs/version-2.8/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.8/workflows/recovery.md +++ b/docs/versioned_docs/version-2.8/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.8/workflows/sbom.md b/docs/versioned_docs/version-2.8/workflows/sbom.md index 6c1702dee..c9dc0d5cc 100644 --- a/docs/versioned_docs/version-2.8/workflows/sbom.md +++ b/docs/versioned_docs/version-2.8/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -19,7 +19,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +40,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.8/workflows/scale.md b/docs/versioned_docs/version-2.8/workflows/scale.md index 46b048870..9531e90c9 100644 --- a/docs/versioned_docs/version-2.8/workflows/scale.md +++ b/docs/versioned_docs/version-2.8/workflows/scale.md @@ -48,30 +48,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -79,30 +79,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.8/workflows/storage.md b/docs/versioned_docs/version-2.8/workflows/storage.md index be9998676..d0e5b188f 100644 --- a/docs/versioned_docs/version-2.8/workflows/storage.md +++ b/docs/versioned_docs/version-2.8/workflows/storage.md @@ -21,14 +21,14 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as ReadWriteOnce, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. @@ -36,8 +36,8 @@ This includes support for [volume snapshots](https://cloud.google.com/kubernetes You can use them to bring a volume back to a prior state or provision new volumes. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for information about the configuration. - - + + :::caution @@ -47,8 +47,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction) or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -57,8 +57,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -86,8 +86,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -115,8 +115,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + :::caution @@ -126,8 +126,8 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) @@ -186,8 +186,8 @@ The default storage class is responsible for all persistent volume claims that d Constellation creates a storage class with encryption enabled and sets this as the default class. In case you wish to change it, follow the steps below: - - + + 1. List the storage classes in your cluster: @@ -233,8 +233,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) azuredisk.csi.confidential.cloud Delete Immediate false 1d ``` - - + + 1. List the storage classes in your cluster: @@ -280,8 +280,8 @@ In case you wish to change it, follow the steps below: integrity-encrypted-rwo (default) gcp.csi.confidential.cloud Delete Immediate false 1d ``` - - + + :::caution @@ -291,5 +291,5 @@ You may use other (non-confidential) CSI drivers that are compatible with Kubern ::: - - + + diff --git a/docs/versioned_docs/version-2.8/workflows/terminate.md b/docs/versioned_docs/version-2.8/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.8/workflows/terminate.md +++ b/docs/versioned_docs/version-2.8/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.8/workflows/troubleshooting.md b/docs/versioned_docs/version-2.8/workflows/troubleshooting.md index cd095be28..2ddf3335d 100644 --- a/docs/versioned_docs/version-2.8/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.8/workflows/troubleshooting.md @@ -75,8 +75,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -86,8 +86,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -102,16 +102,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.8/workflows/trusted-launch.md b/docs/versioned_docs/version-2.8/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.8/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.8/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.8/workflows/verify-cli.md b/docs/versioned_docs/version-2.8/workflows/verify-cli.md index aa2df4be4..1280c51b0 100644 --- a/docs/versioned_docs/version-2.8/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.8/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,7 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_docs/version-2.9/architecture/attestation.md b/docs/versioned_docs/version-2.9/architecture/attestation.md index 592063193..07ac3aa72 100644 --- a/docs/versioned_docs/version-2.9/architecture/attestation.md +++ b/docs/versioned_docs/version-2.9/architecture/attestation.md @@ -121,8 +121,8 @@ Constellation allows to specify in the config which measurements should be enfor Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. - - + + Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. @@ -152,8 +152,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. @@ -185,8 +185,8 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. @@ -217,16 +217,16 @@ The latter means that the value can be generated offline and compared to the one | 15 | ClusterID | Constellation Bootstrapper | Yes | | 16–23 | Unused | - | - | - - + + ### CVM verification To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. For verification of the CVM technology, Constellation may expose additional options in its config file. - - + + On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. @@ -248,18 +248,18 @@ You may customize certain parameters for verification of the attestation stateme More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. - - + + There is no additional configuration available for GCP. - - + + There is no additional configuration available for AWS. - - + + ## Cluster attestation diff --git a/docs/versioned_docs/version-2.9/architecture/keys.md b/docs/versioned_docs/version-2.9/architecture/keys.md index 553d9d4e2..f2c8c3fba 100644 --- a/docs/versioned_docs/version-2.9/architecture/keys.md +++ b/docs/versioned_docs/version-2.9/architecture/keys.md @@ -105,7 +105,7 @@ Initially, it will support the following KMSs: * [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) * [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) -Storing the keys in Cloud KMS of AWS, Azure, or GCP binds the key usage to the particular cloud identity access management (IAM). +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. diff --git a/docs/versioned_docs/version-2.9/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.9/getting-started/first-steps-local.md index f9cfa5cd3..8fcd71811 100644 --- a/docs/versioned_docs/version-2.9/getting-started/first-steps-local.md +++ b/docs/versioned_docs/version-2.9/getting-started/first-steps-local.md @@ -45,8 +45,8 @@ sudo iptables -P FORWARD ACCEPT ## Create a cluster - - + + With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). @@ -74,8 +74,8 @@ constellation mini up This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. All `constellation` commands concerning this cluster need to be issued from this directory. - - + + With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. @@ -153,8 +153,8 @@ attaching persistent storage, or autoscaling aren't available. export KUBECONFIG="$PWD/constellation-admin.conf" ``` - - + + ## Connect to the cluster @@ -207,8 +207,8 @@ worker-0 Ready 32s v1.24.6 ## Terminate your cluster - - + + Once you are done, you can clean up the created resources using the following command: @@ -219,8 +219,8 @@ constellation mini down This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + Once you are done, you can clean up the created resources using the following command: @@ -248,8 +248,8 @@ Your Constellation cluster was terminated successfully. This will destroy your cluster and clean up your workspace. The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. - - + + ## Troubleshooting diff --git a/docs/versioned_docs/version-2.9/getting-started/first-steps.md b/docs/versioned_docs/version-2.9/getting-started/first-steps.md index 6b0a06a06..0329c5776 100644 --- a/docs/versioned_docs/version-2.9/getting-started/first-steps.md +++ b/docs/versioned_docs/version-2.9/getting-started/first-steps.md @@ -15,39 +15,39 @@ If you encounter any problem with the following steps, make sure to use the [lat 1. Create the [configuration file](../workflows/config.md) for your cloud provider. - + - + ```bash constellation config generate azure ``` - + - + ```bash constellation config generate gcp ``` - + - + ```bash constellation config generate aws ``` - + - + 2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). - + - + ```bash constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config @@ -62,21 +62,21 @@ If you encounter any problem with the following steps, make sure to use the [lat * `westeurope` * `southeastasia` - + - + ```bash - constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test --update-config + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config ``` - This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. - + - + ```bash constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config @@ -103,8 +103,8 @@ If you encounter any problem with the following steps, make sure to use the [lat You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). - - + + :::tip To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). diff --git a/docs/versioned_docs/version-2.9/getting-started/install.md b/docs/versioned_docs/version-2.9/getting-started/install.md index 2aa274b98..37940d0a2 100644 --- a/docs/versioned_docs/version-2.9/getting-started/install.md +++ b/docs/versioned_docs/version-2.9/getting-started/install.md @@ -11,15 +11,15 @@ Make sure the following requirements are met: - Your machine is running Linux or macOS - You have admin rights on your machine - [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed -- Your CSP is Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) ## Install the Constellation CLI The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). Install it with the following commands: - - + + 1. Download the CLI: @@ -35,8 +35,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-amd64 /usr/local/bin/constellation ``` - - + + 1. Download the CLI: @@ -52,9 +52,10 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-linux-arm64 /usr/local/bin/constellation ``` - - + + + 1. Download the CLI: @@ -70,9 +71,11 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-arm64 /usr/local/bin/constellation ``` - - + + + + 1. Download the CLI: @@ -88,8 +91,8 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c sudo install constellation-darwin-amd64 /usr/local/bin/constellation ``` - - + + :::tip The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. @@ -105,42 +108,39 @@ If you don't have a cloud subscription, you can also set up a [local Constellati ### Required permissions - - + + The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: - -- `Microsoft.Attestation` -- `Microsoft.Compute` -- `Microsoft.Insights` -- `Microsoft.ManagedIdentity` -- `Microsoft.Network` +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` By default, Constellation tries to register these automatically if they haven't been registered before. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `*/register/action` \[1] -- `Microsoft.Authorization/roleAssignments/*` -- `Microsoft.Authorization/roleDefinitions/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Resources/subscriptions/resourcegroups/*` +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` The built-in `Owner` role is a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `Microsoft.Attestation/attestationProviders/*` -- `Microsoft.Compute/virtualMachineScaleSets/*` -- `Microsoft.Insights/components/*` -- `Microsoft.ManagedIdentity/userAssignedIdentities/*` -- `Microsoft.Network/loadBalancers/*` -- `Microsoft.Network/loadBalancers/backendAddressPools/*` -- `Microsoft.Network/networkSecurityGroups/*` -- `Microsoft.Network/publicIPAddresses/*` -- `Microsoft.Network/virtualNetworks/*` -- `Microsoft.Network/virtualNetworks/subnets/*` -- `Microsoft.Network/natGateways/*` +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` The built-in `Contributor` role is a superset of these permissions. @@ -148,91 +148,91 @@ Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/az 1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. - - +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + Create a new project for Constellation or use an existing one. Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: - -- `iam.serviceAccountKeys.create` -- `iam.serviceAccountKeys.delete` -- `iam.serviceAccountKeys.get` -- `iam.serviceAccounts.create` -- `iam.serviceAccounts.delete` -- `iam.serviceAccounts.get` -- `resourcemanager.projects.getIamPolicy` -- `resourcemanager.projects.setIamPolicy` +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: - -- `compute.addresses.createInternal` -- `compute.addresses.deleteInternal` -- `compute.addresses.get` -- `compute.addresses.useInternal` -- `compute.backendServices.create` -- `compute.backendServices.delete` -- `compute.backendServices.get` -- `compute.backendServices.use` -- `compute.disks.create` -- `compute.firewalls.create` -- `compute.firewalls.delete` -- `compute.firewalls.get` -- `compute.globalAddresses.create` -- `compute.globalAddresses.delete` -- `compute.globalAddresses.get` -- `compute.globalAddresses.use` -- `compute.globalForwardingRules.create` -- `compute.globalForwardingRules.delete` -- `compute.globalForwardingRules.get` -- `compute.globalForwardingRules.setLabels` -- `compute.globalOperations.get` -- `compute.healthChecks.create` -- `compute.healthChecks.delete` -- `compute.healthChecks.get` -- `compute.healthChecks.useReadOnly` -- `compute.instanceGroupManagers.create` -- `compute.instanceGroupManagers.delete` -- `compute.instanceGroupManagers.get` -- `compute.instanceGroups.create` -- `compute.instanceGroups.delete` -- `compute.instanceGroups.get` -- `compute.instanceGroups.use` -- `compute.instances.create` -- `compute.instances.setLabels` -- `compute.instances.setMetadata` -- `compute.instances.setTags` -- `compute.instanceTemplates.create` -- `compute.instanceTemplates.delete` -- `compute.instanceTemplates.get` -- `compute.instanceTemplates.useReadOnly` -- `compute.networks.create` -- `compute.networks.delete` -- `compute.networks.get` -- `compute.networks.updatePolicy` -- `compute.routers.create` -- `compute.routers.delete` -- `compute.routers.get` -- `compute.routers.update` -- `compute.subnetworks.create` -- `compute.subnetworks.delete` -- `compute.subnetworks.get` -- `compute.subnetworks.use` -- `compute.targetTcpProxies.create` -- `compute.targetTcpProxies.delete` -- `compute.targetTcpProxies.get` -- `compute.targetTcpProxies.use` -- `iam.serviceAccounts.actAs` +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). - - + + To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. @@ -367,8 +367,8 @@ The built-in `PowerUserAccess` policy is a superset of these permissions. Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). - - + + ### Authentication @@ -378,8 +378,8 @@ You need to authenticate with your CSP. The following lists the required steps f The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. ::: - - + + **Testing** @@ -395,8 +395,8 @@ az login Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). - - + + **Testing** @@ -419,8 +419,8 @@ Use one of the following options on a trusted machine: Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. - - + + **Testing** @@ -436,9 +436,10 @@ aws configure Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). - + - + + ## Next steps diff --git a/docs/versioned_docs/version-2.9/overview/clouds.md b/docs/versioned_docs/version-2.9/overview/clouds.md index dfc3d5307..3ccbb0d6d 100644 --- a/docs/versioned_docs/version-2.9/overview/clouds.md +++ b/docs/versioned_docs/version-2.9/overview/clouds.md @@ -31,11 +31,11 @@ This firmware is signed by Azure. The signature is reflected in the remote-attestation statements of CVMs. Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). -\* Recently, [Azure announced the open source paravisor OpenHCL](https://techcommunity.microsoft.com/blog/windowsosplatform/openhcl-the-new-open-source-paravisor/4273172). It's the foundation for fully open source and verifiable CVM firmware. Once Azure provides their CVM firmware with reproducible builds based on OpenHCL, (4) switches from *No* to *Yes*. Constellation will support OpenHCL based firmware on Azure in the future. +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. ## Google Cloud Platform (GCP) -The [CVMs Generally Available in GCP](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview#amd_sev) are based on AMD SEV but don't have SNP features enabled. +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. However, regarding (4), the CVMs still include closed-source firmware. diff --git a/docs/versioned_docs/version-2.9/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.9/overview/confidential-kubernetes.md index 1441c833a..2b6c6ed17 100644 --- a/docs/versioned_docs/version-2.9/overview/confidential-kubernetes.md +++ b/docs/versioned_docs/version-2.9/overview/confidential-kubernetes.md @@ -23,9 +23,9 @@ With the above, Constellation wraps an entire cluster into one coherent and veri ![Confidential Kubernetes](../_media/concept-constellation.svg) -## Comparison: Managed Kubernetes with CVMs +## Contrast: Managed Kubernetes with CVMs -In comparison, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. ![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) diff --git a/docs/versioned_docs/version-2.9/overview/performance.md b/docs/versioned_docs/version-2.9/overview/performance.md index aef594c46..9518ad538 100644 --- a/docs/versioned_docs/version-2.9/overview/performance.md +++ b/docs/versioned_docs/version-2.9/overview/performance.md @@ -70,7 +70,7 @@ The following infrastructure configurations was used: This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). -GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. diff --git a/docs/versioned_docs/version-2.9/overview/product.md b/docs/versioned_docs/version-2.9/overview/product.md index e42596fcc..ba7181aa9 100644 --- a/docs/versioned_docs/version-2.9/overview/product.md +++ b/docs/versioned_docs/version-2.9/overview/product.md @@ -6,6 +6,6 @@ From a security perspective, Constellation implements the [Confidential Kubernet From an operational perspective, Constellation provides the following key features: -* **Native support for different clouds**: Constellation works on Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). * **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. * **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.9/workflows/config.md b/docs/versioned_docs/version-2.9/workflows/config.md index 22a2821d8..f276f3f63 100644 --- a/docs/versioned_docs/version-2.9/workflows/config.md +++ b/docs/versioned_docs/version-2.9/workflows/config.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -14,49 +14,49 @@ Before you can create your cluster, you need to configure the identity and acces You can generate a configuration file for your CSP by using the following CLI command: - - + + ```bash constellation config generate azure ``` - - + + ```bash constellation config generate gcp ``` - - + + ```bash constellation config generate aws ``` - - + + This creates the file `constellation-conf.yaml` in the current directory. ## Choosing a VM type Constellation supports the following VM types: - - + + By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. You can also run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. - - + + By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. @@ -75,8 +75,8 @@ AWS is currently investigating the issue. SNP-based attestation will be enabled as soon as a fix is verified. ::: - - + + Fill the desired VM type into the **instanceType** field in the `constellation-conf.yml` file. @@ -90,8 +90,8 @@ See also Constellation's [Kubernetes support policy](../architecture/versions.md You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. - - + + You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -111,23 +111,23 @@ Note that CVMs are currently only supported in a few regions, check [Azure's pro Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). ```bash -constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west3-a --serviceAccountID=constell-test +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test ``` -This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west3-a` creating a new service account `constell-test`. +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + + You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). @@ -151,16 +151,16 @@ You can find a list of all [regions in AWS's documentation](https://docs.aws.ama Paste the output into the corresponding fields of the `constellation-conf.yaml` file. - - + +
Alternatively, you can manually create the IAM configuration on your CSP. The following describes the configuration fields and how you obtain the required information or create the required resources. - - + + * **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. @@ -189,19 +189,19 @@ The following describes the configuration fields and how you obtain the required The user-assigned identity is used by instances of the cluster to access other cloud resources. For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). - + - + * **project**: The ID of your GCP project, e.g., `constellation-129857`. You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). -* **region**: The GCP region you want to deploy your cluster in, e.g., `us-central1`. +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). -* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-central1-a`. +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). @@ -215,9 +215,9 @@ The following describes the configuration fields and how you obtain the required Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. - + - + * **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. @@ -248,9 +248,9 @@ The following describes the configuration fields and how you obtain the required Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. - + - +
Now that you've configured your CSP, you can [create your cluster](./create.md). diff --git a/docs/versioned_docs/version-2.9/workflows/create.md b/docs/versioned_docs/version-2.9/workflows/create.md index 5c4dd2948..aff59bb6a 100644 --- a/docs/versioned_docs/version-2.9/workflows/create.md +++ b/docs/versioned_docs/version-2.9/workflows/create.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -26,8 +26,8 @@ Before you create the cluster, make sure to have a [valid configuration file](./ ### Create - - + + Choose the initial size of your cluster. The following command creates a cluster with one control-plane and two worker nodes: @@ -40,8 +40,8 @@ For details on the flags, consult the command help via `constellation create -h` *create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. - - + + Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. @@ -80,8 +80,8 @@ CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json ``` - - + + ## The *init* step diff --git a/docs/versioned_docs/version-2.9/workflows/recovery.md b/docs/versioned_docs/version-2.9/workflows/recovery.md index 35596b8c9..c26fb32eb 100644 --- a/docs/versioned_docs/version-2.9/workflows/recovery.md +++ b/docs/versioned_docs/version-2.9/workflows/recovery.md @@ -16,8 +16,8 @@ You can check the health status of the nodes via the cloud service provider (CSP Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. - - + + In the Azure portal, find the cluster's resource group. Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. @@ -51,8 +51,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, check that the control plane *Instance Group* has enough members in a *Ready* state. In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. @@ -87,8 +87,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. @@ -118,8 +118,8 @@ If this fails due to an unhealthy control plane, you will see log messages simil This means that you have to recover the node manually. - - + + ## Recover a cluster diff --git a/docs/versioned_docs/version-2.9/workflows/sbom.md b/docs/versioned_docs/version-2.9/workflows/sbom.md index 6c1702dee..c9dc0d5cc 100644 --- a/docs/versioned_docs/version-2.9/workflows/sbom.md +++ b/docs/versioned_docs/version-2.9/workflows/sbom.md @@ -1,6 +1,6 @@ # Consume software bill of materials (SBOMs) - + --- @@ -19,7 +19,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). Make sure the key is available in a file named `cosign.pub` to execute the following examples. ::: @@ -40,7 +40,7 @@ cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig cons ### Container Images -SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/cosign/signing/other_types/#sboms-software-bill-of-materials) and uploaded to the same registry. +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. As a consumer, use cosign to download and verify the SBOM: diff --git a/docs/versioned_docs/version-2.9/workflows/scale.md b/docs/versioned_docs/version-2.9/workflows/scale.md index 63b727c7d..06898ad0c 100644 --- a/docs/versioned_docs/version-2.9/workflows/scale.md +++ b/docs/versioned_docs/version-2.9/workflows/scale.md @@ -51,30 +51,30 @@ kubectl -n kube-system get nodes Alternatively, you can manually scale your cluster up or down: - - + + 1. Find your Constellation resource group. 2. Select the `scale-set-workers`. 3. Go to **settings** and **scaling**. 4. Set the new **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **worker** instance group. 3. Set the new **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the worker ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + ## Control-plane node scaling @@ -82,30 +82,30 @@ Control-plane nodes can **only be scaled manually and only scaled up**! To increase the number of control-plane nodes, follow these steps: - + - + 1. Find your Constellation resource group. 2. Select the `scale-set-controlplanes`. 3. Go to **settings** and **scaling**. 4. Set the new (increased) **instance count** and **save**. - - + + 1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). 2. **Edit** the **control-plane** instance group. 3. Set the new (increased) **number of instances** and **save**. - - + + 1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. 2. Click **Edit** 3. Set the new (increased) **Desired capacity** and **Update**. - - + + If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.9/workflows/storage.md b/docs/versioned_docs/version-2.9/workflows/storage.md index 06fbc4de6..9e3d96346 100644 --- a/docs/versioned_docs/version-2.9/workflows/storage.md +++ b/docs/versioned_docs/version-2.9/workflows/storage.md @@ -21,30 +21,30 @@ For more details see [encrypted persistent storage](../architecture/encrypted-st Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. - - + + **Constellation CSI driver for Azure Disk**: Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. - - + + **Constellation CSI driver for GCP Persistent Disk**: Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. - - + + **Constellation CSI driver for AWS Elastic Block Store** Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. - - + + Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. @@ -53,8 +53,8 @@ Note that in case the options above aren't a suitable solution for you, Constell The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. - - + + Azure comes with two storage classes by default. @@ -82,8 +82,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + GCP comes with two storage classes by default. @@ -111,8 +111,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + AWS comes with two storage classes by default. @@ -140,8 +140,8 @@ Note that volume expansion isn't supported for integrity-protected disks. ::: - - + + 1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/versioned_docs/version-2.9/workflows/terminate.md b/docs/versioned_docs/version-2.9/workflows/terminate.md index f33489ca5..647eadb42 100644 --- a/docs/versioned_docs/version-2.9/workflows/terminate.md +++ b/docs/versioned_docs/version-2.9/workflows/terminate.md @@ -4,7 +4,7 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- @@ -16,8 +16,8 @@ All ephemeral storage and state of your cluster will be lost. Make sure any data ::: - - + + Terminate the cluster by running: ```bash @@ -40,8 +40,8 @@ resources manually. Just run the `terminate` command again afterward to continue ::: - - + + Terminate the cluster by running: ```bash @@ -56,5 +56,5 @@ rm constellation-id.json constellation-admin.conf Only the `constellation-mastersecret.json` and the configuration file remain. - - + + diff --git a/docs/versioned_docs/version-2.9/workflows/troubleshooting.md b/docs/versioned_docs/version-2.9/workflows/troubleshooting.md index cd095be28..2ddf3335d 100644 --- a/docs/versioned_docs/version-2.9/workflows/troubleshooting.md +++ b/docs/versioned_docs/version-2.9/workflows/troubleshooting.md @@ -75,8 +75,8 @@ To provide information during early stages of a node's boot process, Constellati You can view this information in the following places: - - + + 1. In your Azure subscription find the Constellation resource group. 2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. @@ -86,8 +86,8 @@ You can view this information in the following places: To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` - - + + 1. Select the project that hosts Constellation. 2. Go to the `Compute Engine` service. @@ -102,16 +102,16 @@ Constellation uses the default bucket to store logs. Its [default retention peri ::: - - + + 1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) 2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) 3. Select the log group that matches the name of your cluster. 4. Select the log stream for control or worker type nodes. - - + + ### Node shell access diff --git a/docs/versioned_docs/version-2.9/workflows/trusted-launch.md b/docs/versioned_docs/version-2.9/workflows/trusted-launch.md index 11d0a096c..13bd63ba6 100644 --- a/docs/versioned_docs/version-2.9/workflows/trusted-launch.md +++ b/docs/versioned_docs/version-2.9/workflows/trusted-launch.md @@ -14,7 +14,7 @@ Constellation supports trusted launch VMs with instance types `Standard_D*_v4` a Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. -The latest image is available at `https://cdn.confidential.cloud/constellation/images/azure/trusted-launch/v2.2.0/constellation.img`. Simply adjust the version number to download a newer version. +The latest image is available at . Simply adjust the version number to download a newer version. After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. You can use a script to do this: @@ -26,7 +26,6 @@ AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_I ``` The script creates the following resources: - 1. A new image gallery with the default name `constellation-import` 2. A new image definition with the default name `constellation` 3. The actual image with the provided version. In this case `2.2.0` diff --git a/docs/versioned_docs/version-2.9/workflows/verify-cli.md b/docs/versioned_docs/version-2.9/workflows/verify-cli.md index aa2df4be4..1280c51b0 100644 --- a/docs/versioned_docs/version-2.9/workflows/verify-cli.md +++ b/docs/versioned_docs/version-2.9/workflows/verify-cli.md @@ -4,11 +4,11 @@ This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. ::: - + --- -Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/cosign/signing/overview/), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at `https://rekor.sigstore.dev`. +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . :::note The public key for Edgeless Systems' long-term code-signing key is: @@ -20,7 +20,7 @@ JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== -----END PUBLIC KEY----- ``` -The public key is also available for download at [https://edgeless.systems/es.pub](https://edgeless.systems/es.pub) and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). ::: The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. @@ -33,7 +33,7 @@ You don't need to verify the Constellation node images. This is done automatical ## Verify the signature -First, [install the Cosign CLI](https://docs.sigstore.dev/cosign/system_config/installation/). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: ```shell-session $ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 diff --git a/docs/versioned_sidebars/version-2.10-sidebars.json b/docs/versioned_sidebars/version-2.10-sidebars.json index 9d47f4b26..02898994d 100644 --- a/docs/versioned_sidebars/version-2.10-sidebars.json +++ b/docs/versioned_sidebars/version-2.10-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.11-sidebars.json b/docs/versioned_sidebars/version-2.11-sidebars.json index 8e0ad0ffb..17740bcca 100644 --- a/docs/versioned_sidebars/version-2.11-sidebars.json +++ b/docs/versioned_sidebars/version-2.11-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.12-sidebars.json b/docs/versioned_sidebars/version-2.12-sidebars.json index e4c845754..81aaba77d 100644 --- a/docs/versioned_sidebars/version-2.12-sidebars.json +++ b/docs/versioned_sidebars/version-2.12-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.13-sidebars.json b/docs/versioned_sidebars/version-2.13-sidebars.json index 6317fc3f0..38caa4ac8 100644 --- a/docs/versioned_sidebars/version-2.13-sidebars.json +++ b/docs/versioned_sidebars/version-2.13-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.14-sidebars.json b/docs/versioned_sidebars/version-2.14-sidebars.json index ed97049b8..80e7edbea 100644 --- a/docs/versioned_sidebars/version-2.14-sidebars.json +++ b/docs/versioned_sidebars/version-2.14-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.15-sidebars.json b/docs/versioned_sidebars/version-2.15-sidebars.json index 09b5ec04e..b137f339c 100644 --- a/docs/versioned_sidebars/version-2.15-sidebars.json +++ b/docs/versioned_sidebars/version-2.15-sidebars.json @@ -40,11 +40,6 @@ "id": "overview/performance/performance" }, "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, { "type": "doc", "label": "I/O benchmarks", diff --git a/docs/versioned_sidebars/version-2.16-sidebars.json b/docs/versioned_sidebars/version-2.16-sidebars.json deleted file mode 100644 index 09b5ec04e..000000000 --- a/docs/versioned_sidebars/version-2.16-sidebars.json +++ /dev/null @@ -1,299 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.17-sidebars.json b/docs/versioned_sidebars/version-2.17-sidebars.json deleted file mode 100644 index 09b5ec04e..000000000 --- a/docs/versioned_sidebars/version-2.17-sidebars.json +++ /dev/null @@ -1,299 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.18-sidebars.json b/docs/versioned_sidebars/version-2.18-sidebars.json deleted file mode 100644 index 09b5ec04e..000000000 --- a/docs/versioned_sidebars/version-2.18-sidebars.json +++ /dev/null @@ -1,299 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.19-sidebars.json b/docs/versioned_sidebars/version-2.19-sidebars.json deleted file mode 100644 index 09b5ec04e..000000000 --- a/docs/versioned_sidebars/version-2.19-sidebars.json +++ /dev/null @@ -1,299 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.20-sidebars.json b/docs/versioned_sidebars/version-2.20-sidebars.json deleted file mode 100644 index c9937ab4f..000000000 --- a/docs/versioned_sidebars/version-2.20-sidebars.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Reproduce release artifacts", - "id": "workflows/reproducible-builds" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.21-sidebars.json b/docs/versioned_sidebars/version-2.21-sidebars.json deleted file mode 100644 index c9937ab4f..000000000 --- a/docs/versioned_sidebars/version-2.21-sidebars.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Reproduce release artifacts", - "id": "workflows/reproducible-builds" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.22-sidebars.json b/docs/versioned_sidebars/version-2.22-sidebars.json deleted file mode 100644 index c9937ab4f..000000000 --- a/docs/versioned_sidebars/version-2.22-sidebars.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Reproduce release artifacts", - "id": "workflows/reproducible-builds" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versioned_sidebars/version-2.23-sidebars.json b/docs/versioned_sidebars/version-2.23-sidebars.json deleted file mode 100644 index c9937ab4f..000000000 --- a/docs/versioned_sidebars/version-2.23-sidebars.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "docs": [ - { - "type": "doc", - "label": "Introduction", - "id": "intro" - }, - { - "type": "category", - "label": "Basics", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Confidential Kubernetes", - "id": "overview/confidential-kubernetes" - }, - { - "type": "doc", - "label": "Security benefits", - "id": "overview/security-benefits" - }, - { - "type": "doc", - "label": "Product features", - "id": "overview/product" - }, - { - "type": "doc", - "label": "Feature status of clouds", - "id": "overview/clouds" - }, - { - "type": "category", - "label": "Performance", - "link": { - "type": "doc", - "id": "overview/performance/performance" - }, - "items": [ - { - "type": "doc", - "label": "Compute benchmarks", - "id": "overview/performance/compute" - }, - { - "type": "doc", - "label": "I/O benchmarks", - "id": "overview/performance/io" - }, - { - "type": "doc", - "label": "Application benchmarks", - "id": "overview/performance/application" - } - ] - }, - { - "type": "doc", - "label": "License", - "id": "overview/license" - } - ] - }, - { - "type": "category", - "label": "Getting started", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Installation", - "id": "getting-started/install" - }, - { - "type": "doc", - "label": "First steps (cloud)", - "id": "getting-started/first-steps" - }, - { - "type": "doc", - "label": "First steps (local)", - "id": "getting-started/first-steps-local" - }, - { - "type": "doc", - "label": "Cloud Marketplaces", - "id": "getting-started/marketplaces" - }, - { - "type": "category", - "label": "Examples", - "link": { - "type": "doc", - "id": "getting-started/examples" - }, - "items": [ - { - "type": "doc", - "label": "Emojivoto", - "id": "getting-started/examples/emojivoto" - }, - { - "type": "doc", - "label": "Online Boutique", - "id": "getting-started/examples/online-boutique" - }, - { - "type": "doc", - "label": "Horizontal Pod Autoscaling", - "id": "getting-started/examples/horizontal-scaling" - }, - { - "type": "doc", - "label": "Filestash with s3proxy", - "id": "getting-started/examples/filestash-s3proxy" - } - ] - } - ] - }, - { - "type": "category", - "label": "Workflows", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Verify the CLI", - "id": "workflows/verify-cli" - }, - { - "type": "doc", - "label": "Configure your cluster", - "id": "workflows/config" - }, - { - "type": "doc", - "label": "Create your cluster", - "id": "workflows/create" - }, - { - "type": "doc", - "label": "Scale your cluster", - "id": "workflows/scale" - }, - { - "type": "doc", - "label": "Upgrade your cluster", - "id": "workflows/upgrade" - }, - { - "type": "doc", - "label": "Expose a service", - "id": "workflows/lb" - }, - { - "type": "doc", - "label": "Install cert-manager", - "id": "workflows/cert-manager" - }, - { - "type": "doc", - "label": "Install s3proxy", - "id": "workflows/s3proxy" - }, - { - "type": "doc", - "label": "Terminate your cluster", - "id": "workflows/terminate" - }, - { - "type": "doc", - "label": "Recover your cluster", - "id": "workflows/recovery" - }, - { - "type": "doc", - "label": "Verify your cluster", - "id": "workflows/verify-cluster" - }, - { - "type": "doc", - "label": "Use persistent storage", - "id": "workflows/storage" - }, - { - "type": "doc", - "label": "Use the Terraform provider", - "id": "workflows/terraform-provider" - }, - { - "type": "doc", - "label": "Consume SBOMs", - "id": "workflows/sbom" - }, - { - "type": "doc", - "label": "Reproduce release artifacts", - "id": "workflows/reproducible-builds" - }, - { - "type": "doc", - "label": "Troubleshooting", - "id": "workflows/troubleshooting" - } - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "Overview", - "id": "architecture/overview" - }, - { - "type": "doc", - "label": "Cluster orchestration", - "id": "architecture/orchestration" - }, - { - "type": "doc", - "label": "Versions and support", - "id": "architecture/versions" - }, - { - "type": "doc", - "label": "Microservices", - "id": "architecture/microservices" - }, - { - "type": "doc", - "label": "Attestation", - "id": "architecture/attestation" - }, - { - "type": "doc", - "label": "Images", - "id": "architecture/images" - }, - { - "type": "doc", - "label": "Keys and cryptographic primitives", - "id": "architecture/keys" - }, - { - "type": "doc", - "label": "Encrypted persistent storage", - "id": "architecture/encrypted-storage" - }, - { - "type": "doc", - "label": "Networking", - "id": "architecture/networking" - }, - { - "type": "doc", - "label": "Observability", - "id": "architecture/observability" - } - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "generated-index" - }, - "items": [ - { - "type": "doc", - "label": "CLI", - "id": "reference/cli" - }, - { - "type": "doc", - "label": "Configuration migrations", - "id": "reference/migration" - }, - { - "type": "doc", - "label": "Terraform usage", - "id": "reference/terraform" - }, - { - "type": "doc", - "label": "SLSA adoption", - "id": "reference/slsa" - } - ] - } - ] -} diff --git a/docs/versions.json b/docs/versions.json index 8cc6150df..4747d18fe 100644 --- a/docs/versions.json +++ b/docs/versions.json @@ -1,12 +1,4 @@ [ - "2.23", - "2.22", - "2.21", - "2.20", - "2.19", - "2.18", - "2.17", - "2.16", "2.15", "2.14", "2.13", diff --git a/e2e/e2e.go b/e2e/e2e.go index 4c23c394c..43a0044e7 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // End-to-end tests which are executed from our GitHub action pipelines. diff --git a/e2e/internal/kubectl/kubectl.go b/e2e/internal/kubectl/kubectl.go index e44abd446..2fb191b30 100644 --- a/e2e/internal/kubectl/kubectl.go +++ b/e2e/internal/kubectl/kubectl.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Provides functionality to easily interact with the K8s API, which can be used diff --git a/e2e/internal/lb/lb.go b/e2e/internal/lb/lb.go index b38202079..2cd01237b 100644 --- a/e2e/internal/lb/lb.go +++ b/e2e/internal/lb/lb.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package lb tests that the cloud load balancer works as expected. diff --git a/e2e/internal/lb/lb_test.go b/e2e/internal/lb/lb_test.go index c8a3d2d16..94c8d2ff3 100644 --- a/e2e/internal/lb/lb_test.go +++ b/e2e/internal/lb/lb_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // End-to-end tests for our cloud load balancer functionality. @@ -12,6 +12,7 @@ package lb import ( "bufio" "bytes" + "context" "fmt" "io" "net/http" @@ -69,7 +70,7 @@ func TestLoadBalancer(t *testing.T) { t.Log("Change port of service to 8044") svc.Spec.Ports[0].Port = newPort - svc, err = k.CoreV1().Services(namespaceName).Update(t.Context(), svc, metaV1.UpdateOptions{}) + svc, err = k.CoreV1().Services(namespaceName).Update(context.Background(), svc, metaV1.UpdateOptions{}) require.NoError(err) assert.Equal(newPort, svc.Spec.Ports[0].Port) @@ -92,7 +93,7 @@ func gatherDebugInfo(t *testing.T, k *kubernetes.Clientset) { t.Log("Gathering additional debug information.") - pods, err := k.CoreV1().Pods(namespaceName).List(t.Context(), metaV1.ListOptions{ + pods, err := k.CoreV1().Pods(namespaceName).List(context.Background(), metaV1.ListOptions{ LabelSelector: "app=whoami", }) if err != nil { @@ -105,7 +106,7 @@ func gatherDebugInfo(t *testing.T, k *kubernetes.Clientset) { req := k.CoreV1().Pods(namespaceName).GetLogs(pod.Name, &coreV1.PodLogOptions{ LimitBytes: func() *int64 { i := int64(1024 * 1024); return &i }(), }) - logs, err := req.Stream(t.Context()) + logs, err := req.Stream(context.Background()) if err != nil { t.Logf("fetching logs: %v", err) return @@ -154,7 +155,7 @@ func testEventuallyStatusOK(t *testing.T, url string) { require := require.New(t) assert.Eventually(func() bool { - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, url, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) require.NoError(err) resp, err := http.DefaultClient.Do(req) @@ -182,7 +183,7 @@ func testEventuallyExternalIPAvailable(t *testing.T, k *kubernetes.Clientset) *c require.Eventually(t, func() bool { var err error - svc, err = k.CoreV1().Services(namespaceName).Get(t.Context(), serviceName, metaV1.GetOptions{}) + svc, err = k.CoreV1().Services(namespaceName).Get(context.Background(), serviceName, metaV1.GetOptions{}) if err != nil { t.Log("Getting service failed: ", err.Error()) return false @@ -211,7 +212,7 @@ func testEndpointAvailable(t *testing.T, url string, allHostnames []string, reqI assert := assert.New(t) require := require.New(t) - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, url, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) require.NoError(err) resp, err := http.DefaultClient.Do(req) diff --git a/e2e/internal/upgrade/BUILD.bazel b/e2e/internal/upgrade/BUILD.bazel index b97119e5f..6e368e94f 100644 --- a/e2e/internal/upgrade/BUILD.bazel +++ b/e2e/internal/upgrade/BUILD.bazel @@ -20,7 +20,7 @@ go_library( "//internal/versions", "@com_github_spf13_afero//:afero", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", "@io_k8s_client_go//kubernetes", "@sh_helm_helm_v3//pkg/action", @@ -47,7 +47,6 @@ go_test( "//e2e/internal/kubectl", "//internal/constants", "//internal/versions", - "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@io_k8s_api//core/v1:core", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", diff --git a/e2e/internal/upgrade/helm.go b/e2e/internal/upgrade/helm.go index 23b55d327..18b48a13c 100644 --- a/e2e/internal/upgrade/helm.go +++ b/e2e/internal/upgrade/helm.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package upgrade diff --git a/e2e/internal/upgrade/upgrade.go b/e2e/internal/upgrade/upgrade.go index 09452bf13..14eb2227a 100644 --- a/e2e/internal/upgrade/upgrade.go +++ b/e2e/internal/upgrade/upgrade.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package upgrade tests that the CLI's apply command works as expected and @@ -90,7 +90,7 @@ func testStatusEventuallyWorks(t *testing.T, cli string, timeout time.Duration) // Show versions set in cluster. // The string after "Cluster status:" in the output might not be updated yet. // This is only updated after the operator finishes one reconcile loop. - cmd := exec.CommandContext(t.Context(), cli, "status") + cmd := exec.CommandContext(context.Background(), cli, "status") stdout, stderr, err := runCommandWithSeparateOutputs(cmd) if err != nil { log.Printf("Stdout: %s\nStderr: %s", string(stdout), string(stderr)) @@ -121,7 +121,7 @@ func testMicroservicesEventuallyHaveVersion(t *testing.T, wantMicroserviceVersio func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targetVersions VersionContainer, totalNodeCount int, timeout time.Duration) { require.Eventually(t, func() bool { - nodes, err := k.CoreV1().Nodes().List(t.Context(), metaV1.ListOptions{}) + nodes, err := k.CoreV1().Nodes().List(context.Background(), metaV1.ListOptions{}) if err != nil { log.Println(err) return false @@ -142,6 +142,7 @@ func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targe if key == "constellation.edgeless.systems/node-image" { if !strings.EqualFold(value, targetVersions.ImageRef) { log.Printf("\t%s: Image %s, want %s\n", node.Name, value, targetVersions.ImageRef) + fmt.Printf("\tP: %s: Image %s, want %s\n", node.Name, value, targetVersions.ImageRef) allUpdated = false } } @@ -153,6 +154,11 @@ func testNodesEventuallyHaveVersion(t *testing.T, k *kubernetes.Clientset, targe log.Printf("\t%s: K8s (Kubelet) %s, want %s\n", node.Name, kubeletVersion, targetVersions.Kubernetes) allUpdated = false } + kubeProxyVersion := node.Status.NodeInfo.KubeProxyVersion + if kubeProxyVersion != string(targetVersions.Kubernetes) { + log.Printf("\t%s: K8s (Proxy) %s, want %s\n", node.Name, kubeProxyVersion, targetVersions.Kubernetes) + allUpdated = false + } } } return allUpdated @@ -182,8 +188,7 @@ func runCommandWithSeparateOutputs(cmd *exec.Cmd) (stdout, stderr []byte, err er return } - continuouslyPrintOutput := func(r io.Reader, prefix string, wg *sync.WaitGroup) { - defer wg.Done() + continuouslyPrintOutput := func(r io.Reader, prefix string) { scanner := bufio.NewScanner(r) for scanner.Scan() { output := scanner.Text() @@ -197,15 +202,12 @@ func runCommandWithSeparateOutputs(cmd *exec.Cmd) (stdout, stderr []byte, err er } } - wg := &sync.WaitGroup{} - wg.Add(2) - go continuouslyPrintOutput(stdoutIn, "stdout", wg) - go continuouslyPrintOutput(stderrIn, "stderr", wg) + go continuouslyPrintOutput(stdoutIn, "stdout") + go continuouslyPrintOutput(stderrIn, "stderr") if err = cmd.Wait(); err != nil { err = fmt.Errorf("wait for command to finish: %w", err) } - wg.Wait() return stdout, stderr, err } @@ -301,10 +303,10 @@ func getCLIPath(cliPathFlag string) (string, error) { pathCLI := os.Getenv("PATH_CLI") var relCLIPath string switch { - case cliPathFlag != "": - relCLIPath = cliPathFlag case pathCLI != "": relCLIPath = pathCLI + case cliPathFlag != "": + relCLIPath = cliPathFlag default: return "", errors.New("neither 'PATH_CLI' nor 'cli' flag set") } diff --git a/e2e/internal/upgrade/upgrade_test.go b/e2e/internal/upgrade/upgrade_test.go index 6f1a7b517..4206348f2 100644 --- a/e2e/internal/upgrade/upgrade_test.go +++ b/e2e/internal/upgrade/upgrade_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package upgrade @@ -23,7 +23,6 @@ import ( "github.com/edgelesssys/constellation/v2/e2e/internal/kubectl" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/versions" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coreV1 "k8s.io/api/core/v1" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,7 +71,7 @@ func TestUpgrade(t *testing.T) { targetVersions := WriteUpgradeConfig(require, *targetImage, *targetKubernetes, *targetMicroservices, constants.ConfigFilename) log.Println("Fetching measurements for new image.") - cmd := exec.CommandContext(t.Context(), cli, "config", "fetch-measurements", "--insecure", "--debug") + cmd := exec.CommandContext(context.Background(), cli, "config", "fetch-measurements", "--insecure", "--debug") stdout, stderr, err := runCommandWithSeparateOutputs(cmd) require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr)) log.Println(string(stdout)) @@ -82,11 +81,10 @@ func TestUpgrade(t *testing.T) { log.Println(string(data)) log.Println("Checking upgrade.") - assert := assert.New(t) // use assert because this part is more brittle and should not fail the entire test - runUpgradeCheck(t.Context(), assert, cli, *targetKubernetes) + runUpgradeCheck(require, cli, *targetKubernetes) log.Println("Triggering upgrade.") - runUpgradeApply(t.Context(), require, cli) + runUpgradeApply(require, cli) AssertUpgradeSuccessful(t, cli, targetVersions, k, *wantControl, *wantWorker, *timeout) } @@ -96,7 +94,7 @@ func TestUpgrade(t *testing.T) { // 2) all pods have good status conditions. func testPodsEventuallyReady(t *testing.T, k *kubernetes.Clientset, namespace string) { require.Eventually(t, func() bool { - pods, err := k.CoreV1().Pods(namespace).List(t.Context(), metaV1.ListOptions{}) + pods, err := k.CoreV1().Pods(namespace).List(context.Background(), metaV1.ListOptions{}) if err != nil { log.Println(err) return false @@ -127,7 +125,7 @@ func testPodsEventuallyReady(t *testing.T, k *kubernetes.Clientset, namespace st // 2) the expected number of nodes have joined the cluster. func testNodesEventuallyAvailable(t *testing.T, k *kubernetes.Clientset, wantControlNodeCount, wantWorkerNodeCount int) { require.Eventually(t, func() bool { - nodes, err := k.CoreV1().Nodes().List(t.Context(), metaV1.ListOptions{}) + nodes, err := k.CoreV1().Nodes().List(context.Background(), metaV1.ListOptions{}) if err != nil { log.Println(err) return false @@ -172,25 +170,25 @@ func testNodesEventuallyAvailable(t *testing.T, k *kubernetes.Clientset, wantCon // runUpgradeCheck executes 'upgrade check' and does basic checks on the output. // We can not check images upgrades because we might use unpublished images. CLI uses public CDN to check for available images. -func runUpgradeCheck(ctx context.Context, assert *assert.Assertions, cli, targetKubernetes string) { - cmd := exec.CommandContext(ctx, cli, "upgrade", "check", "--debug") +func runUpgradeCheck(require *require.Assertions, cli, targetKubernetes string) { + cmd := exec.CommandContext(context.Background(), cli, "upgrade", "check", "--debug") stdout, stderr, err := runCommandWithSeparateOutputs(cmd) - assert.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr)) + require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr)) - assert.Contains(string(stdout), "The following updates are available with this CLI:") - assert.Contains(string(stdout), "Kubernetes:") + require.Contains(string(stdout), "The following updates are available with this CLI:") + require.Contains(string(stdout), "Kubernetes:") log.Printf("targetKubernetes: %s\n", targetKubernetes) if targetKubernetes == "" { log.Printf("true\n") - assert.True(containsAny(string(stdout), versions.SupportedK8sVersions())) + require.True(containsAny(string(stdout), versions.SupportedK8sVersions())) } else { log.Printf("false. targetKubernetes: %s\n", targetKubernetes) - assert.Contains(string(stdout), targetKubernetes, fmt.Sprintf("Expected Kubernetes version %s in output.", targetKubernetes)) + require.Contains(string(stdout), targetKubernetes, fmt.Sprintf("Expected Kubernetes version %s in output.", targetKubernetes)) } - assert.Contains(string(stdout), "Services:") - assert.Contains(string(stdout), fmt.Sprintf("--> %s", constants.BinaryVersion().String())) + require.Contains(string(stdout), "Services:") + require.Contains(string(stdout), fmt.Sprintf("--> %s", constants.BinaryVersion().String())) log.Println(string(stdout)) } @@ -204,16 +202,16 @@ func containsAny(text string, substrs []string) bool { return false } -func runUpgradeApply(ctx context.Context, require *require.Assertions, cli string) { +func runUpgradeApply(require *require.Assertions, cli string) { tfLogFlag := "" - cmd := exec.CommandContext(ctx, cli, "--help") + cmd := exec.CommandContext(context.Background(), cli, "--help") stdout, stderr, err := runCommandWithSeparateOutputs(cmd) require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr)) if strings.Contains(string(stdout), "--tf-log") { tfLogFlag = "--tf-log=DEBUG" } - cmd = exec.CommandContext(ctx, cli, "apply", "--debug", "--yes", tfLogFlag) + cmd = exec.CommandContext(context.Background(), cli, "apply", "--debug", "--yes", tfLogFlag) stdout, stderr, err = runCommandWithSeparateOutputs(cmd) require.NoError(err, "Stdout: %s\nStderr: %s", string(stdout), string(stderr)) require.NoError(containsUnexepectedMsg(string(stdout))) diff --git a/e2e/malicious-join/malicious-join.go b/e2e/malicious-join/malicious-join.go index c32885af3..981035880 100644 --- a/e2e/malicious-join/malicious-join.go +++ b/e2e/malicious-join/malicious-join.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // End-to-end test that issues various types of malicious join requests to a cluster. @@ -155,13 +155,13 @@ type maliciousJoiner struct { // join issues a join request to the join service endpoint. func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) { - j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %q", j.endpoint)) - conn, err := j.dialer.Dial(j.endpoint) + j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %s", j.endpoint)) + conn, err := j.dialer.Dial(ctx, j.endpoint) if err != nil { return nil, fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() - j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %q", j.endpoint)) + j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %s", j.endpoint)) protoClient := joinproto.NewAPIClient(conn) @@ -172,10 +172,10 @@ func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketR IsControlPlane: false, } res, err := protoClient.IssueJoinTicket(ctx, req) + j.logger.Debug(fmt.Sprintf("Got join ticket response: %+v", res)) if err != nil { return nil, fmt.Errorf("issuing join ticket: %w", err) } - j.logger.Debug("Got join ticket response", "apiServerEndpoint", res.ApiServerEndpoint, "kubernetesVersion", res.KubernetesVersion) return res, nil } diff --git a/e2e/miniconstellation/.terraform.lock.hcl b/e2e/miniconstellation/.terraform.lock.hcl index 87cddd3bb..b29acb322 100644 --- a/e2e/miniconstellation/.terraform.lock.hcl +++ b/e2e/miniconstellation/.terraform.lock.hcl @@ -2,91 +2,97 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { - version = "4.29.0" - constraints = "4.29.0" + version = "3.92.0" + constraints = "3.92.0" hashes = [ - "h1:Bde/KCh2xGVCBx/JnixC3I2fmoRTwHXgsapfQ5QG8eg=", - "h1:IyINmgNiLfWx3Istkt5Mz+IJrDhSMhj3/qQeJlC4qS4=", - "h1:KEJAt0mJAACyIKUB5mCk/wqtxKMhivdeW8w6byz5Ll4=", - "h1:Y4gTSs+ZE5YSJVXG2qmsbXmv9Daq5aGM8Ip/GE6nev8=", - "h1:YtcHvTVfVBKbMCp9esoj527R1UK/hU0Zmo3pyQb8YhQ=", - "h1:atJdgnuqk+w3v4Zzhw2B1FZeYYA4su9JfanwNsx+c8o=", - "h1:c9tmtEdVTb9siGa3hVxPrMVl9ij5zijnD02JMHcHjrE=", - "h1:eN0KhMGVepEPpSA+YN5Kaz/v9PFKCafbkqqBzpLJf+g=", - "h1:hNVKlXTx2duXnR6SNKtyQMx7zSIlrxBu66Z0gbyfv3c=", - "h1:jC2GJo4VzTKnKociUDLVv8/+u9Mz+4scZrqbEasV+Y0=", - "h1:m3xYvc9X0pec0Zd1dpn82ALQ+6vwz56RnF/3CbkI2Eg=", - "zh:16590eea68c7c8aedb7af19f690eb34ab6636ef417b3fa9e06ca038fdb4c44b8", - "zh:1c907dfe44d00a54aa63d443004add90188f9a53ef3e919aff8aba92f715f42b", - "zh:258a0ff4198d80cae33c89091cd556d84c1b522c4416458484f23719a0cdf4cc", - "zh:587f5e9b2b33e51b18fb0f372025c961c3f57f2958b388459dd8432412650bda", - "zh:6318ca03bd9dbac272a75bb193688c7d4c4b45c7460289820528f31bcd6c3fe9", - "zh:63e4e8128e26e4c3e0c3b6582ef527245eb35eb5c80ad278dc675ebdf71edeaf", - "zh:845c898a27a84a15ba26e95ee66ac9563f81bc672b5ca216af82d87fe09bd5f8", - "zh:8fa6434fa212d5501185f0adc985d3a3c1e0f449c78f040a4ca640cb1e809cac", - "zh:9b49c0d72ab19aab43b2b48d23c5dddbbe29afae1569a987e6f20ed4c80ddf4c", - "zh:b14cc1ee5e3acf52490de7dd9791cce7953c0ee4bcccf0306aafd256568bd69f", - "zh:cd444836b2579fa42bfca2ae6145d394c41b6438b1ae01078c060bfaf803bb4d", + "h1:+bZPRgjpUA6LivvMIS1UdwRWUgzoYBp/nhEpbL4aXHM=", + "h1:D5lngW1uKlPM2EUCdNG1f2FvPGHYRklDFN8b2jPCIpM=", + "h1:nTP2ZYfuEpMP+PkkgRdhQphNmWWJuKdE9Z4TzeC7ydo=", + "h1:sqVZftg9rJGDiiPiY9l1V/a+5CWkxNcj22sBu8HsJBY=", + "h1:swoRk2drVrD8v7GrW/2OJSk06v2I2zGk3XPAgBDbw9A=", + "zh:04292e149676ba956d738e85faeb6d6ebd3759e8310f1c4155e67402eb5ae0f7", + "zh:0963b4528f25d01d5c733e17de31e2c0b94790fd02931b2a47cd051b20dd0d96", + "zh:133563e16e8a4a7139ac11d94e68de8d1d5e3a62a532e64ac936735d7b1e04db", + "zh:2b219f1b40881d3bdd89257c916f255a7e36904ddc65dbbafee80763661b4636", + "zh:4b4e11a4e3716b290b3b173dfd15b06814b2f6f148f663e3c67a677c95526339", + "zh:5607c7bff3019c3b31488be1a8a9d77a96d27b199a1d8b789e4c2d4c90805674", + "zh:6469aef7728947dacb47785e6082d2d95ebd336a8798f3be6cece5a13145108c", + "zh:69e563f4e6397e1ebaef6f554d296238ec1d9dadc4b865c36743bd8366a888da", + "zh:887a223b7a9ec4e66634dbb65d9dcc53f0be06d058d9a209927ad49702ae790c", + "zh:b03c273367885c5489a24c31859af81ea58cb169431c0da97a175945ec968f53", + "zh:dd7b704ceaf98ce591e111a9c5085465c946f4f8f357089c0e27e990a669ba39", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } provider "registry.terraform.io/hashicorp/cloudinit" { - version = "2.3.7" - constraints = "2.3.7" + version = "2.3.3" + constraints = "2.3.3" hashes = [ - "h1:/hny5kXmhcnuJDD1V+5XCrZOYDIqja2U47VM4DPEnBA=", - "h1:A9COAUjeBJ+fgYAI/PKtDs4Wzs50srFSY+KkfpSVGLw=", - "h1:Lt8lqrdNgZRlkOTwSXZTyuJkiVXnpwTsWAqHQPL6sIY=", - "h1:M9TpQxKAE/hyOwytdX9MUNZw30HoD/OXqYIug5fkqH8=", - "h1:coZHiZww6hWZoOoWw0p+6oeYb/tMh1uTvX1Y2ZzzXqE=", - "h1:dgBaiMxxU61piW30emM6251LMFW66TbKR+p5ylPZvqc=", - "h1:h1Pr6uNwq+iDEGrnQJEHzOTz+yVTW0AJgZrGXuoO4Qs=", - "h1:ht83gEvyri0BD3sata7BDhx31N/KbCECIozG7UM/kC8=", - "h1:iZ27qylcH/2bs685LJTKOKcQ+g7cF3VwN3kHMrzm4Ow=", - "h1:ll35IR++uaXwfwqZFFRWrvS0idO1mX43Y/embsaOe4k=", - "h1:rafNPmTutVTO2Horq45DG9Pjqrs+vx42oc7b/3aVGEc=", - "zh:06f1c54e919425c3139f8aeb8fcf9bceca7e560d48c9f0c1e3bb0a8ad9d9da1e", - "zh:0e1e4cf6fd98b019e764c28586a386dc136129fef50af8c7165a067e7e4a31d5", - "zh:1871f4337c7c57287d4d67396f633d224b8938708b772abfc664d1f80bd67edd", - "zh:2b9269d91b742a71b2248439d5e9824f0447e6d261bfb86a8a88528609b136d1", - "zh:3d8ae039af21426072c66d6a59a467d51f2d9189b8198616888c1b7fc42addc7", - "zh:3ef4e2db5bcf3e2d915921adced43929214e0946a6fb11793085d9a48995ae01", - "zh:42ae54381147437c83cbb8790cc68935d71b6357728a154109d3220b1beb4dc9", - "zh:4496b362605ae4cbc9ef7995d102351e2fe311897586ffc7a4a262ccca0c782a", - "zh:652a2401257a12706d32842f66dac05a735693abcb3e6517d6b5e2573729ba13", - "zh:7406c30806f5979eaed5f50c548eced2ea18ea121e01801d2f0d4d87a04f6a14", - "zh:7848429fd5a5bcf35f6fee8487df0fb64b09ec071330f3ff240c0343fe2a5224", + "h1:6utKe3l0lf4+hw+uVK8XJVNdOmp7tOTcycyFvv1pGAA=", + "h1:GmJ8PxLjjPr+lh02Bw3u7RYqA3UtpE2hQ1T43Vt7PTQ=", + "h1:TCZQjXesJ9qbOZaHjJse/WyOxYQwp7wUX3VNxL/qo1c=", + "h1:U6EC4/cJJ6Df3LztUQ/I4YuljGQQeQ+LdLndAwSSiTs=", + "h1:ZmQ97fIcPW7hj/vynRB4zbtObK0Z/LVJPvCwlNd78zA=", + "zh:0bd6ee14ca5cf0f0c83d3bb965346b1225ccd06a6247e80774aaaf54c729daa7", + "zh:3055ad0dcc98de1d4e45b72c5889ae91b62f4ae4e54dbc56c4821be0fdfbed91", + "zh:32764cfcff0d7379ca8b7dde376ac5551854d454c5881945f1952b785a312fa2", + "zh:55c2a4dc3ebdeaa1dec3a36db96dab253c7fa10b9fe1209862e1ee77a01e0aa1", + "zh:5c71f260ba5674d656d12f67cde3bb494498e6b6b6e66945ef85688f185dcf63", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9617280a853ec7caedb8beb7864e4b29faf9c850a453283980c28fccef2c493d", + "zh:ac8bda21950f8dddade3e9bc15f7bcfdee743738483be5724169943cafa611f5", + "zh:ba9ab567bbe63dee9197a763b3104ea9217ba27449ed54d3afa6657f412e3496", + "zh:effd1a7e34bae3879c02f03ed3afa979433a518e11de1f8afd35a8710231ac14", + "zh:f021538c86d0ac250d75e59efde6d869bbfff711eb744c8bddce79d2475bf46d", + "zh:f1e3984597948a2103391a26600e177b19f16a5a4c66acee27a4343fb141571f", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.0" + constraints = "3.6.0" + hashes = [ + "h1:5KeoKSVKVHJW308uiTgslxFbjQAdWzBGUFK68vgMRWY=", + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "h1:t0mRdJzegohRKhfdoQEJnv3JRISSezJRblN0HIe67vo=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ] } provider "registry.terraform.io/hashicorp/tls" { - version = "4.1.0" - constraints = "4.1.0" + version = "4.0.5" + constraints = "4.0.5" hashes = [ - "h1:4gd/jiOS0zJxjTd5Q4o/gOp24RxcuwQ/TxwjTYQNPz4=", - "h1:C0J7AsrVHVqnDT9tICDNaKvA9iH6WTLS2EYzCEegpx0=", - "h1:Ka8mEwRFXBabR33iN/WTIEW6RP0z13vFsDlwn11Pf2I=", - "h1:ReNkTkCM64bktu54eGwQc29rhIejMLQsYA6kYNyBWno=", - "h1:UklaKJOCynnEJbpCVN0zJKIJ3SvO7RQJ00/6grBatnw=", - "h1:ZHcr1WIomuU6ZV+dzEwAG1+52JP0e0d/+l7bo3N5p88=", - "h1:eZa3vbx1pbiwnajuKvGWE7jWK+nHQ8lcLc/mO6Rhf4o=", - "h1:iSgnCUoLGMkt31RlflnL09NyjpAH0DX6bb9QBw5IE9Y=", - "h1:uDtqTpFJOseNUlPDx4TT/lXf6ie3CarsimL7sYCiVH4=", - "h1:y9cHrgcuaZt592In6xQzz1lx7k/B9EeWrAb8K7QqOgU=", - "h1:zEv9tY1KR5vaLSyp2lkrucNJ+Vq3c+sTFK9GyQGLtFs=", - "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", - "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", - "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", - "zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc", - "zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac", - "zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882", - "zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d", - "zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298", - "zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297", - "zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a", + "h1:e4LBdJoZJNOQXPWgOAG0UuPBVhCStu98PieNlqJTmeU=", + "h1:jb/Rg9inGYp4t8HtBoETESsQJgdmOHoe1bzzg2uNB3w=", + "h1:kcw9sNLNFMY2S0HIGOkjlwKtUc8lpqZsQGsC2SG9xEQ=", + "h1:yLqz+skP3+EbU3yyvw8JqzflQTKDQGsC9QyZAg+S4dg=", + "h1:zeG5RmggBZW/8JWIVrdaeSJa0OG62uFX5HY1eE8SjzY=", + "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", + "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", + "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", + "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", + "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", + "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", + "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", + "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", + "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", + "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", + "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54", ] } diff --git a/e2e/miniconstellation/BUILD.bazel b/e2e/miniconstellation/BUILD.bazel index 47dd3b278..836b2d075 100644 --- a/e2e/miniconstellation/BUILD.bazel +++ b/e2e/miniconstellation/BUILD.bazel @@ -1,5 +1,4 @@ load("@com_github_ash2k_bazel_tools//multirun:def.bzl", "multirun") -load("@rules_shell//shell:sh_library.bzl", "sh_library") load("//bazel/sh:def.bzl", "sh_template") filegroup( @@ -10,7 +9,6 @@ filegroup( "main.tf", "output.tf", "test-remote.sh", - "variables.tf", ], ) diff --git a/e2e/miniconstellation/main.sh.in b/e2e/miniconstellation/main.sh.in index f25975554..694f17bb3 100755 --- a/e2e/miniconstellation/main.sh.in +++ b/e2e/miniconstellation/main.sh.in @@ -23,11 +23,6 @@ cd e2e/miniconstellation echo "::group::Terraform" -random_string=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 6) -rg_name="e2e-mini-${random_string}" -echo "rgname=${rg_name}" >> "${GITHUB_OUTPUT:-/dev/null}" -echo "resource_name = \"${rg_name}\"" > terraform.tfvars - terraform init terraform apply -auto-approve terraform output -raw ssh_private_key > id_rsa diff --git a/e2e/miniconstellation/main.tf b/e2e/miniconstellation/main.tf index 2054a63a9..094a217e2 100644 --- a/e2e/miniconstellation/main.tf +++ b/e2e/miniconstellation/main.tf @@ -2,15 +2,19 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "4.29.0" + version = "3.92.0" + } + random = { + source = "hashicorp/random" + version = "3.6.0" } tls = { source = "hashicorp/tls" - version = "4.1.0" + version = "4.0.5" } cloudinit = { source = "hashicorp/cloudinit" - version = "2.3.7" + version = "2.3.3" } } } @@ -18,13 +22,15 @@ terraform { provider "azurerm" { use_oidc = true features {} - # This enables all resource providers. - # In the future, we might want to use `resource_providers_to_register` to registers just the ones we need. - resource_provider_registrations = "all" } provider "tls" {} +resource "random_string" "suffix" { + length = 6 + special = false +} + resource "tls_private_key" "ssh_key" { algorithm = "RSA" rsa_bits = 2048 @@ -41,26 +47,26 @@ data "cloudinit_config" "cloud_init" { } resource "azurerm_resource_group" "main" { - name = var.resource_name - location = "West Europe" + name = "e2e-mini-${random_string.suffix.result}" + location = "North Europe" } resource "azurerm_virtual_network" "main" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" address_space = ["10.0.0.0/16"] location = azurerm_resource_group.main.location resource_group_name = azurerm_resource_group.main.name } resource "azurerm_subnet" "main" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" resource_group_name = azurerm_resource_group.main.name virtual_network_name = azurerm_virtual_network.main.name address_prefixes = ["10.0.2.0/24"] } resource "azurerm_public_ip" "main" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" location = azurerm_resource_group.main.location resource_group_name = azurerm_resource_group.main.name allocation_method = "Static" @@ -68,7 +74,7 @@ resource "azurerm_public_ip" "main" { } resource "azurerm_network_interface" "main" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" resource_group_name = azurerm_resource_group.main.name location = azurerm_resource_group.main.location @@ -81,7 +87,7 @@ resource "azurerm_network_interface" "main" { } resource "azurerm_network_security_group" "ssh" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" resource_group_name = azurerm_resource_group.main.name location = azurerm_resource_group.main.location @@ -104,7 +110,7 @@ resource "azurerm_subnet_network_security_group_association" "ssh" { } resource "azurerm_linux_virtual_machine" "main" { - name = var.resource_name + name = "e2e-mini-${random_string.suffix.result}" resource_group_name = azurerm_resource_group.main.name location = azurerm_resource_group.main.location diff --git a/e2e/miniconstellation/test-remote.sh b/e2e/miniconstellation/test-remote.sh index c5fbf4ac5..10a95d8c4 100755 --- a/e2e/miniconstellation/test-remote.sh +++ b/e2e/miniconstellation/test-remote.sh @@ -45,8 +45,6 @@ fi echo "Done waiting." -echo '127.0.0.1 license.confidential.cloud' | sudo tee /etc/hosts > /dev/null - ./constellation mini up --debug export KUBECONFIG="${PWD}/constellation-admin.conf" diff --git a/e2e/miniconstellation/variables.tf b/e2e/miniconstellation/variables.tf deleted file mode 100644 index eb2034c7f..000000000 --- a/e2e/miniconstellation/variables.tf +++ /dev/null @@ -1,4 +0,0 @@ -variable "resource_name" { - type = string - description = "name for resources to create" -} diff --git a/e2e/provider-upgrade/upgrade_test.go b/e2e/provider-upgrade/upgrade_test.go index 821158ac6..78bcd65b7 100644 --- a/e2e/provider-upgrade/upgrade_test.go +++ b/e2e/provider-upgrade/upgrade_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // End-to-end test that is used by the e2e Terraform provider test. diff --git a/flake.lock b/flake.lock index b424b9c4a..59e778edd 100644 --- a/flake.lock +++ b/flake.lock @@ -5,11 +5,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1731533236, - "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", "owner": "numtide", "repo": "flake-utils", - "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "type": "github" }, "original": { @@ -18,13 +18,13 @@ "type": "github" } }, - "nixpkgs": { + "nixpkgsUnstable": { "locked": { - "lastModified": 1743938762, - "narHash": "sha256-UgFYn8sGv9B8PoFpUfCa43CjMZBl1x/ShQhRDHBFQdI=", + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "74a40410369a1c35ee09b8a1abee6f4acbedc059", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", "type": "github" }, "original": { @@ -37,7 +37,8 @@ "root": { "inputs": { "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "nixpkgsUnstable": "nixpkgsUnstable", + "uplosi": "uplosi" } }, "systems": { @@ -54,6 +55,29 @@ "repo": "default", "type": "github" } + }, + "uplosi": { + "inputs": { + "flake-utils": [ + "flake-utils" + ], + "nixpkgs": [ + "nixpkgsUnstable" + ] + }, + "locked": { + "lastModified": 1708338970, + "narHash": "sha256-eX86SesWRQN7S5g6sXgKG5Q6KJLcip2MxJIIIM1Lj+E=", + "owner": "edgelesssys", + "repo": "uplosi", + "rev": "32e47c6d2445cdc76b8458f785b921431c2ae609", + "type": "github" + }, + "original": { + "owner": "edgelesssys", + "repo": "uplosi", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 1e1304460..ff88be0b5 100644 --- a/flake.nix +++ b/flake.nix @@ -2,135 +2,87 @@ description = "Constellation"; inputs = { - nixpkgs = { + nixpkgsUnstable = { url = "github:NixOS/nixpkgs/nixpkgs-unstable"; }; flake-utils = { url = "github:numtide/flake-utils"; }; + uplosi = { + url = "github:edgelesssys/uplosi"; + inputs.nixpkgs.follows = "nixpkgsUnstable"; + inputs.flake-utils.follows = "flake-utils"; + }; }; outputs = - { - self, - nixpkgs, - flake-utils, + { self + , nixpkgsUnstable + , flake-utils + , uplosi }: - flake-utils.lib.eachDefaultSystem ( - system: - let - overlay = final: prev: { - rpm = prev.rpm.overrideAttrs (old: { - nativeBuildInputs = old.nativeBuildInputs ++ [ prev.makeWrapper ]; - postFixup = '' - wrapProgram $out/lib/rpm/sysusers.sh \ - --set PATH ${ - prev.lib.makeBinPath ( - with prev; - [ - coreutils - findutils - su.out - gnugrep - ] - ) - } - ''; - }); + flake-utils.lib.eachDefaultSystem (system: + let + pkgsUnstable = import nixpkgsUnstable { inherit system; }; - # dnf5 assumes a TTY with a very small width by default, truncating its output instead of line-wrapping - # it. Force it to use more VT columns to avoid this, and make debugging errors easier. - dnf5-stub = prev.writeScriptBin "dnf5" '' - #!/usr/bin/env bash - FORCE_COLUMNS=200 ${final.dnf5}/bin/dnf5 $@ - ''; - }; + callPackage = pkgsUnstable.callPackage; - pkgs = import nixpkgs { - inherit system; - config.allowUnfree = true; + mkosiDev = (pkgsUnstable.mkosi.overrideAttrs (oldAttrs: rec { + propagatedBuildInputs = oldAttrs.propagatedBuildInputs ++ (with pkgsUnstable; [ + # package management + dnf5 + rpm + createrepo_c - overlays = [ - (_final: prev: (import ./nix/packages { inherit (prev) lib callPackage; })) - (_final: prev: { lib = prev.lib // (import ./nix/lib { inherit (prev) lib callPackage; }); }) - overlay - ]; - }; + # filesystem tools + squashfsTools # mksquashfs + dosfstools # mkfs.vfat + mtools # mcopy + cryptsetup # dm-verity + util-linux # flock + kmod # depmod + cpio # cpio + zstd # zstd + xz # xz - callPackage = pkgs.callPackage; + # utils + gnused # sed + gnugrep # grep + ]); + })); - mkosiDev = ( - pkgs.mkosi.override { - extraDeps = ( - with pkgs; - [ - # package management - dnf5-stub - rpm - createrepo_c + uplosiDev = uplosi.outputs.packages."${system}".uplosi; - # filesystem tools - squashfsTools # mksquashfs - dosfstools # mkfs.vfat - mtools # mcopy - cryptsetup # dm-verity - util-linux # flock - kmod # depmod - cpio # cpio - zstd # zstd - xz # xz + openssl-static = pkgsUnstable.openssl.override { static = true; }; - # utils - gnused # sed - gnugrep # grep - ] - ); - } - ); - in - { - # Use `legacyPackages` instead of `packages` for the reason explained here: - # https://github.com/NixOS/nixpkgs/blob/34def00657d7c45c51b0762eb5f5309689a909a5/flake.nix#L138-L156 - # Note that it's *not* a legacy attribute. - legacyPackages = { - generate = pkgs.callPackage ./nix/generate.nix { }; - } // pkgs; + bazel_7 = callPackage ./nix/packages/bazel.nix { pkgs = pkgsUnstable; nixpkgs = nixpkgsUnstable; }; - packages.mkosi = mkosiDev; + in + { + packages.mkosi = mkosiDev; - packages.uplosi = pkgs.uplosi; + packages.uplosi = uplosiDev; - packages.openssl = callPackage ./nix/cc/openssl.nix { pkgs = pkgs; }; + packages.openssl = callPackage ./nix/cc/openssl.nix { pkgs = pkgsUnstable; }; - packages.cryptsetup = callPackage ./nix/cc/cryptsetup.nix { - pkgs = pkgs; - pkgsLinux = import nixpkgs { system = "x86_64-linux"; }; - }; + packages.cryptsetup = callPackage ./nix/cc/cryptsetup.nix { pkgs = pkgsUnstable; pkgsLinux = import nixpkgsUnstable { system = "x86_64-linux"; }; }; - packages.libvirt = callPackage ./nix/cc/libvirt.nix { - pkgs = pkgs; - pkgsLinux = import nixpkgs { system = "x86_64-linux"; }; - }; + packages.libvirt = callPackage ./nix/cc/libvirt.nix { pkgs = pkgsUnstable; pkgsLinux = import nixpkgsUnstable { system = "x86_64-linux"; }; }; - packages.libvirtd_base = callPackage ./nix/container/libvirtd_base.nix { - pkgs = pkgs; - pkgsLinux = import nixpkgs { system = "x86_64-linux"; }; - }; + packages.libvirtd_base = callPackage ./nix/container/libvirtd_base.nix { pkgs = pkgsUnstable; pkgsLinux = import nixpkgsUnstable { system = "x86_64-linux"; }; }; - packages.vpn = callPackage ./nix/container/vpn/vpn.nix { - pkgs = pkgs; - pkgsLinux = import nixpkgs { system = "x86_64-linux"; }; - }; + packages.vpn = callPackage ./nix/container/vpn/vpn.nix { pkgs = pkgsUnstable; pkgsLinux = import nixpkgsUnstable { system = "x86_64-linux"; }; }; - packages.awscli2 = pkgs.awscli2; + packages.awscli2 = pkgsUnstable.awscli2; - packages.createrepo_c = pkgs.createrepo_c; + packages.bazel_7 = bazel_7; - packages.dnf5 = pkgs.dnf5; + packages.createrepo_c = pkgsUnstable.createrepo_c; - devShells.default = callPackage ./nix/shells/default.nix { }; + packages.dnf5 = pkgsUnstable.dnf5; - formatter = nixpkgs.legacyPackages.${system}.nixpkgs-fmt; - } - ); + devShells.default = callPackage ./nix/shells/default.nix { inherit bazel_7; }; + + formatter = nixpkgsUnstable.legacyPackages.${system}.nixpkgs-fmt; + }); } diff --git a/go.mod b/go.mod index 82e6194be..8d0f11fd4 100644 --- a/go.mod +++ b/go.mod @@ -1,163 +1,166 @@ module github.com/edgelesssys/constellation/v2 -go 1.24.4 +go 1.22 -// TODO(daniel-weisse): revert after merging https://github.com/martinjungblut/go-cryptsetup/pull/16. -replace github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c - -// TODO(daniel-weisse): revert after merging https://github.com/google/go-sev-guest/pull/173. -replace github.com/google/go-sev-guest => github.com/daniel-weisse/go-sev-guest v0.0.0-20250728114912-0c2ba277c52b - -// Kubernetes replace directives are required because we depend on k8s.io/kubernetes/cmd/kubeadm -// k8s discourages usage of k8s.io/kubernetes as a dependency, but no external staging repositories for kubeadm exist. -// Our code does not actually depend on these packages, but `go mod download` breaks without this replace directive. -// See this issue: https://github.com/kubernetes/kubernetes/issues/79384 -// And this README: https://github.com/kubernetes/kubernetes/blob/master/staging/README.md replace ( - k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.3 - k8s.io/controller-manager => k8s.io/controller-manager v0.33.3 - k8s.io/cri-client => k8s.io/cri-client v0.33.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.3 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.3 - k8s.io/endpointslice => k8s.io/endpointslice v0.33.3 - k8s.io/externaljwt => k8s.io/externaljwt v0.33.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.3 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.14 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.3 + k8s.io/api v0.0.0 => k8s.io/api v0.29.0 + k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.29.0 + k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.29.0 + k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.29.0 + k8s.io/client-go v0.0.0 => k8s.io/client-go v0.29.0 + k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.29.0 + k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.29.0 + k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.29.0 + k8s.io/component-base v0.0.0 => k8s.io/component-base v0.29.0 + k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.29.0 + k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.29.0 + k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.29.0 + k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.29.0 + k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.29.0 + k8s.io/endpointslice v0.0.0 => k8s.io/endpointslice v0.29.0 + k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.29.0 + k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.29.0 + k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.29.0 + k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.29.0 + k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.0 + k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.29.0 + k8s.io/kubernetes v0.0.0 => k8s.io/kubernetes v1.29.0 + k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.29.0 + k8s.io/metrics v0.0.0 => k8s.io/metrics v0.29.0 + k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.29.0 + k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.29.0 + k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.29.0 +) + +replace ( + github.com/google/go-sev-guest => github.com/google/go-sev-guest v0.0.0-20230928233922-2dcbba0a4b9d + github.com/google/go-tpm => github.com/thomasten/go-tpm v0.0.0-20230629092004-f43f8e2a59eb + github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c + github.com/tink-crypto/tink-go/v2 v2.0.0 => github.com/derpsteb/tink-go/v2 v2.0.0-20231002051717-a808e454eed6 ) require ( - cloud.google.com/go/compute v1.41.0 - cloud.google.com/go/compute/metadata v0.7.0 - cloud.google.com/go/kms v1.22.0 - cloud.google.com/go/secretmanager v1.15.0 - cloud.google.com/go/storage v1.56.0 - dario.cat/mergo v1.0.2 + cloud.google.com/go/compute v1.24.0 + cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/kms v1.15.7 + cloud.google.com/go/secretmanager v1.11.5 + cloud.google.com/go/storage v1.38.0 + dario.cat/mergo v1.0.0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.4.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0 - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 - github.com/BurntSushi/toml v1.5.0 - github.com/aws/aws-sdk-go v1.55.7 - github.com/aws/aws-sdk-go-v2 v1.37.1 - github.com/aws/aws-sdk-go-v2/config v1.30.2 - github.com/aws/aws-sdk-go-v2/credentials v1.18.2 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.2 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.55.1 - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.49.0 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.238.0 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.47.1 - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.27.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.36.1 - github.com/aws/smithy-go v1.22.5 - github.com/bazelbuild/buildtools v0.0.0-20250715102656-62b9413b08bb - github.com/bazelbuild/rules_go v0.55.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.0.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 + github.com/aws/aws-sdk-go v1.50.22 + github.com/aws/aws-sdk-go-v2 v1.25.0 + github.com/aws/aws-sdk-go-v2/config v1.27.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.1 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.3 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.39.1 + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.34.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.148.1 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.29.1 + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.20.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.50.2 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.2 + github.com/aws/smithy-go v1.20.0 + github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e + github.com/bazelbuild/rules_go v0.42.0 github.com/coreos/go-systemd/v22 v22.5.0 - github.com/docker/docker v28.3.3+incompatible - github.com/edgelesssys/go-azguestattestation v0.0.0-20250408071817-8c4457b235ff - github.com/edgelesssys/go-tdx-qpl v0.0.0-20250129202750-607ac61e2377 - github.com/foxboron/go-uefi v0.0.0-20250625111927-a3183a1bfc84 - github.com/fsnotify/fsnotify v1.9.0 + github.com/docker/docker v25.0.3+incompatible + github.com/edgelesssys/go-azguestattestation v0.0.0-20230707101700-a683be600fcf + github.com/edgelesssys/go-tdx-qpl v0.0.0-20240123150912-dcad3c41ec5f + github.com/foxboron/go-uefi v0.0.0-20240128152106-48be911532c2 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator v0.18.1 - github.com/go-playground/validator/v10 v10.27.0 - github.com/golang-jwt/jwt/v5 v5.3.0 - github.com/google/go-sev-guest v0.13.0 - github.com/google/go-tdx-guest v0.3.2-0.20250505161510-9efd53b4a100 - github.com/google/go-tpm v0.9.5 - github.com/google/go-tpm-tools v0.4.4 + github.com/go-playground/validator/v10 v10.14.1 + github.com/golang-jwt/jwt/v5 v5.2.0 + github.com/google/go-sev-guest v0.9.3 + github.com/google/go-tdx-guest v0.3.1 + github.com/google/go-tpm v0.9.0 + github.com/google/go-tpm-tools v0.4.3-0.20240112165732-912a43636883 github.com/google/uuid v1.6.0 - github.com/googleapis/gax-go/v2 v2.15.0 - github.com/gophercloud/gophercloud/v2 v2.7.0 - github.com/gophercloud/utils/v2 v2.0.0-20250711132455-9770683b100a - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.18 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.11 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.14 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.13 - github.com/hashicorp/go-version v1.7.0 - github.com/hashicorp/hc-install v0.9.2 - github.com/hashicorp/hcl/v2 v2.24.0 - github.com/hashicorp/terraform-exec v0.23.0 - github.com/hashicorp/terraform-json v0.25.0 - github.com/hashicorp/terraform-plugin-framework v1.15.0 - github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 - github.com/hashicorp/terraform-plugin-go v0.28.0 + github.com/googleapis/gax-go/v2 v2.12.1 + github.com/gophercloud/gophercloud v1.9.0 + github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.11 + github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/hc-install v0.6.3 + github.com/hashicorp/hcl/v2 v2.19.1 + github.com/hashicorp/terraform-exec v0.20.0 + github.com/hashicorp/terraform-json v0.21.0 + github.com/hashicorp/terraform-plugin-framework v1.5.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-go v0.21.0 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-testing v1.13.2 + github.com/hashicorp/terraform-plugin-testing v1.6.0 github.com/hexops/gotextdiff v1.0.3 github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 github.com/mattn/go-isatty v0.0.20 - github.com/mitchellh/go-homedir v1.1.0 - github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.38.0 + github.com/onsi/ginkgo/v2 v2.14.0 + github.com/onsi/gomega v1.30.0 github.com/pkg/errors v0.9.1 - github.com/regclient/regclient v0.9.0 - github.com/rogpeppe/go-internal v1.14.1 - github.com/samber/slog-multi v1.4.1 - github.com/schollz/progressbar/v3 v3.18.0 - github.com/secure-systems-lab/go-securesystemslib v0.9.0 - github.com/siderolabs/talos/pkg/machinery v1.10.5 - github.com/sigstore/rekor v1.3.10 - github.com/sigstore/sigstore v1.9.5 - github.com/spf13/afero v1.14.0 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 - github.com/stretchr/testify v1.10.0 - github.com/tink-crypto/tink-go/v2 v2.4.0 + github.com/regclient/regclient v0.5.7 + github.com/rogpeppe/go-internal v1.12.0 + github.com/samber/slog-multi v1.0.2 + github.com/schollz/progressbar/v3 v3.14.1 + github.com/siderolabs/talos/pkg/machinery v1.6.4 + github.com/sigstore/rekor v1.3.5 + github.com/sigstore/sigstore v1.8.1 + github.com/spf13/afero v1.11.0 + github.com/spf13/cobra v1.8.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.8.4 + github.com/tink-crypto/tink-go/v2 v2.0.0 github.com/vincent-petithory/dataurl v1.0.0 - go.etcd.io/etcd/api/v3 v3.6.4 - go.etcd.io/etcd/client/pkg/v3 v3.6.4 - go.etcd.io/etcd/client/v3 v3.6.4 + go.etcd.io/etcd/api/v3 v3.5.12 + go.etcd.io/etcd/client/pkg/v3 v3.5.12 + go.etcd.io/etcd/client/v3 v3.5.12 go.uber.org/goleak v1.3.0 - golang.org/x/crypto v0.40.0 - golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 - golang.org/x/mod v0.26.0 - golang.org/x/sys v0.34.0 - golang.org/x/text v0.27.0 - golang.org/x/tools v0.35.0 - google.golang.org/api v0.244.0 - google.golang.org/grpc v1.74.2 - google.golang.org/protobuf v1.36.6 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/mod v0.15.0 + golang.org/x/sys v0.17.0 + golang.org/x/text v0.14.0 + golang.org/x/tools v0.18.0 + google.golang.org/api v0.165.0 + google.golang.org/grpc v1.61.1 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.18.4 - k8s.io/api v0.33.3 - k8s.io/apiextensions-apiserver v0.33.3 - k8s.io/apimachinery v0.33.3 - k8s.io/apiserver v0.33.3 - k8s.io/client-go v0.33.3 - k8s.io/cluster-bootstrap v0.33.3 - k8s.io/kubelet v0.33.3 - k8s.io/kubernetes v1.33.3 - k8s.io/mount-utils v0.33.3 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - libvirt.org/go/libvirt v1.11005.0 - sigs.k8s.io/controller-runtime v0.21.0 - sigs.k8s.io/yaml v1.6.0 + helm.sh/helm v2.17.0+incompatible + helm.sh/helm/v3 v3.14.2 + k8s.io/api v0.29.0 + k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/apimachinery v0.29.0 + k8s.io/apiserver v0.29.0 + k8s.io/client-go v0.29.0 + k8s.io/cluster-bootstrap v0.29.0 + k8s.io/kubelet v0.29.0 + k8s.io/kubernetes v1.29.0 + k8s.io/mount-utils v0.29.0 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + libvirt.org/go/libvirt v1.10000.0 + sigs.k8s.io/yaml v1.4.0 ) require ( - cel.dev/expr v0.24.0 // indirect - cloud.google.com/go v0.121.4 // indirect - cloud.google.com/go/auth v0.16.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/iam v1.5.2 // indirect - cloud.google.com/go/longrunning v0.6.7 // indirect - cloud.google.com/go/monitoring v1.24.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go/iam v1.1.6 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect @@ -166,239 +169,220 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/BurntSushi/toml v1.3.2 github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.1.6 // indirect - github.com/agext/levenshtein v1.2.2 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.0-proton // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.26.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.27.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/cloudflare/circl v1.6.1 // indirect - github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect - github.com/containerd/containerd v1.7.27 // indirect - github.com/containerd/errdefs v1.0.0 // indirect - github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/containerd v1.7.13 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/platforms v0.2.1 // indirect - github.com/coredns/caddy v1.1.1 // indirect - github.com/coredns/corefile-migration v1.0.25 // indirect github.com/coreos/go-semver v0.3.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/distribution/reference v0.6.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v25.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect - github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/evanphx/json-patch v5.9.11+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.18.0 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect - github.com/go-errors/errors v1.4.2 // indirect + github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.4 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-logr/logr v1.4.3 // indirect + github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.1 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/errors v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/loads v0.21.5 // indirect + github.com/go-openapi/runtime v0.27.1 // indirect + github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/strfmt v0.22.0 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/uuid/v5 v5.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.3 // indirect - github.com/google/certificate-transparency-go v1.1.8 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/certificate-transparency-go v1.1.7 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-attestation v0.5.1 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-configfs-tsm v0.3.3-0.20240919001351-b4b5b84fdcbc // indirect - github.com/google/go-containerregistry v0.20.3 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-configfs-tsm v0.2.2 // indirect + github.com/google/go-containerregistry v0.19.0 // indirect github.com/google/go-tspi v0.3.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/logger v1.1.1 // indirect - github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/google/s2a-go v0.1.9 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.5.0 // indirect - github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.3 // indirect - github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 // indirect - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9 // indirect - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.6 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 // indirect - github.com/hashicorp/terraform-registry-address v0.2.5 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.5.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b // indirect - github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect - github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.17.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/leodido/go-urn v1.4.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect + github.com/letsencrypt/boulder v0.0.0-20240216200101-4eb5e3caa228 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/lithammer/dedent v1.1.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/spdystream v0.5.0 // indirect - github.com/moby/sys/atomicwriter v0.1.0 // indirect - github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/term v0.5.2 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pborman/uuid v1.2.1 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/planetscale/vtprotobuf v0.6.1-0.20241121165744-79df5c4772f2 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/rubenv/sql-migrate v1.6.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/samber/lo v1.51.0 // indirect - github.com/samber/slog-common v0.19.0 // indirect + github.com/samber/lo v1.38.1 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/shopspring/decimal v1.4.0 // indirect - github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 + github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.7.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/vtolstov/go-ioctl v0.0.0-20151206205506-6be9cced4810 // indirect - github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect - github.com/zeebo/errs v1.4.0 // indirect + github.com/zclconf/go-cty v1.14.2 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.1 // indirect + go.opentelemetry.io/otel/metric v1.23.1 // indirect + go.opentelemetry.io/otel/trace v1.23.1 // indirect + go.starlark.net v0.0.0-20240123142251-f86470692795 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/net v0.42.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/term v0.33.0 // indirect - golang.org/x/time v0.12.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/time v0.5.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect + gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gotest.tools/v3 v3.4.0 // indirect - k8s.io/cli-runtime v0.33.2 // indirect - k8s.io/component-base v0.33.3 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/kubectl v0.33.2 // indirect - oras.land/oras-go/v2 v2.6.0 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/cli-runtime v0.29.0 // indirect + k8s.io/component-base v0.29.0 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5 // indirect + k8s.io/kubectl v0.29.0 // indirect + oras.land/oras-go v1.2.5 // indirect + sigs.k8s.io/controller-runtime v0.17.2 + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.16.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index c66d89f4d..ca449d14a 100644 --- a/go.sum +++ b/go.sum @@ -1,71 +1,54 @@ -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= -cloud.google.com/go v0.121.4 h1:cVvUiY0sX0xwyxPwdSU2KsF9knOVmtRyAMt8xou0iTs= -cloud.google.com/go v0.121.4/go.mod h1:XEBchUiHFJbz4lKBZwYBDHV/rSyfFktk737TLDU089s= -cloud.google.com/go/auth v0.16.3 h1:kabzoQ9/bobUmnseYnBO6qQG7q4a/CffFRlJSxv2wCc= -cloud.google.com/go/auth v0.16.3/go.mod h1:NucRGjaXfzP1ltpcQ7On/VTZ0H4kWB5Jy+Y9Dnm76fA= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute v1.41.0 h1:S+HvMIzBUAFK/73wxkrA4/GwvM7R9d+egGZvih4kp+M= -cloud.google.com/go/compute v1.41.0/go.mod h1:P1doTJnlwurJDzIQFMp4mgU+vyCe9HU2NWTlqTfq3MY= -cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= -cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= -cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= -cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= -cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= -cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= -cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= -cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= -cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= -cloud.google.com/go/secretmanager v1.15.0 h1:RtkCMgTpaBMbzozcRUGfZe46jb9a3qh5EdEtVRUATF8= -cloud.google.com/go/secretmanager v1.15.0/go.mod h1:1hQSAhKK7FldiYw//wbR/XPfPc08eQ81oBsnRUHEvUc= -cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI= -cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU= -cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= -cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= -dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= -dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= +cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= +cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= +cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= +cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= +cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.4.0 h1:z7Mqz6l0EFH549GvHEqfjKvi+cRScxLWbaoeLm9wxVQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.4.0/go.mod h1:v6gbfH+7DG7xH2kUNs+ZJ9tF6O3iNnR85wMtmr+F54o= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0 h1:HYGD75g0bQ3VO/Omedm54v4LrD3B1cGImuRF3AJ5wLo= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0/go.mod h1:ulHyBFJOI0ONiRL4vcJTmS7rx18jQQlEPmAgo80cRdM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0 h1:/g8S6wk65vfC6m3FIxJ+i5QDyN9JWwXI8Hb0Img10hU= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0/go.mod h1:gpl+q95AzZlKVI3xSoseF9QPrypk0hQqBiJYeB/cR/I= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.0.0 h1:9CrwzqQ+e8EqD+A2bh547GjBU4K0o30FhiTB981LFNI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.0.0/go.mod h1:Wfx7a5UHfOLG6O4NZ7Q0BPZUYwvlNCBR/OlIBpP3dlA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 h1:h4Zxgmi9oyZL2l8jeg1iRTqPloHktywWcu0nlJmo1tA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0/go.mod h1:LgLGXawqSreJz135Elog0ywTJDsm0Hz2k+N+6ZK35u8= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -86,38 +69,36 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= -github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= -github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0-proton h1:P5Wd8eQ6zAzT4HpJI67FDKnTSf3xiJGQFqY1agDJPy4= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -127,63 +108,65 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY= -github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= -github.com/aws/aws-sdk-go-v2/config v1.30.2 h1:YE1BmSc4fFYqFgN1mN8uzrtc7R9x+7oSWeX8ckoltAw= -github.com/aws/aws-sdk-go-v2/config v1.30.2/go.mod h1:UNrLGZ6jfAVjgVJpkIxjLufRJqTXCVYOpkeVf83kwBo= -github.com/aws/aws-sdk-go-v2/credentials v1.18.2 h1:mfm0GKY/PHLhs7KO0sUaOtFnIQ15Qqxt+wXbO/5fIfs= -github.com/aws/aws-sdk-go-v2/credentials v1.18.2/go.mod h1:v0SdJX6ayPeZFQxgXUKw5RhLpAoZUuynxWDfh8+Eknc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1 h1:owmNBboeA0kHKDcdF8KiSXmrIuXZustfMGGytv6OMkM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1/go.mod h1:Bg1miN59SGxrZqlP8vJZSmXW+1N8Y1MjQDq1OfuNod8= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.2 h1:YFX4DvH1CPQXgQR8935b46Om+L7+6jus4aTdKqyDR84= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.2/go.mod h1:DgMPy7GqxcV0RSyaITnI3rw8HC3lIHB87U3KPQKDxHg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 h1:4HbnOGE9491a9zYJ9VpPh1ApgEq6ZlD4Kuv1PJenFpc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1/go.mod h1:Z6QnHC6TmpJWUxAy8FI4JzA7rTwl6EIANkyK9OR5z5w= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.55.1 h1:zX6/huIuV5ldMXSiVVdmRT2oO1M+xNLzdt0du0QuhVE= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.55.1/go.mod h1:KWk5jIp+F7eu9vjz6g/UdeIk5FX2zw7zllkf8EwmHjM= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.49.0 h1:ZABkPLtfK+q2GkW1pA+NukaGM/EAKamEUR347B1md2U= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.49.0/go.mod h1:PHC5ybfgglvCqD7fLaqR5A7LIuJqIoUxhlwF/8faMt0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.238.0 h1:fXZYx7xDSocFM3ht/mwML7eCP7cPbs1ltXEM8zpwU5o= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.238.0/go.mod h1:lhyI/MJGGbPnOdYmmQRZe07S+2fW2uWI1XrUfAZgXLM= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.47.1 h1:H8+KiNkkY3q3u7IUSjc7oCshnHOOGvYOi7fT6ZJ23OI= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.47.1/go.mod h1:91PY/MUWThH0rH61v9r3QA4e7dS/PfXl+K63wltBeas= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 h1:ps3nrmBWdWwakZBydGX1CxeYFK80HsQ79JLMwm7Y4/c= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1/go.mod h1:bAdfrfxENre68Hh2swNaGEVuFYE74o0SaSCAlaG9E74= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 h1:MdVYlN5pcQu1t1OYx4Ajo3fKl1IEhzgdPQbYFCRjYS8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1/go.mod h1:iikmNLrvHm2p4a3/4BPeix2S9P+nW8yM1IZW73x8bFA= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.27.1 h1:dGw/U6NbhnWoW2gw+75/AZvnYjFuxYRtzUpxALoRhLc= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.27.1/go.mod h1:ZNIISn1QONFDUbTmkIK53IBTrGn1TbsrBH5pG/BCwew= -github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 h1:Hsqo8+dFxSdDvv9B2PgIx1AJAnDpqgS0znVI+R+MoGY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1/go.mod h1:8Q0TAPXD68Z8YqlcIGHs/UNIDHsxErV9H4dl4vJEpgw= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.36.1 h1:fnOIjzwTVrtVnkRef3Qs+uTr3qYKwXuFom5pqdZERNQ= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.36.1/go.mod h1:/19D53IxSX9W8uu5bo0t89oCLncvNP68V1KiRthhLd4= -github.com/aws/aws-sdk-go-v2/service/sso v1.26.1 h1:uWaz3DoNK9MNhm7i6UGxqufwu3BEuJZm72WlpGwyVtY= -github.com/aws/aws-sdk-go-v2/service/sso v1.26.1/go.mod h1:ILpVNjL0BO+Z3Mm0SbEeUoYS9e0eJWV1BxNppp0fcb8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 h1:XdG6/o1/ZDmn3wJU5SRAejHaWgKS4zHv0jBamuKuS2k= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1/go.mod h1:oiotGTKadCOCl3vg/tYh4k45JlDF81Ka8rdumNhEnIQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 h1:iF4Xxkc0H9c/K2dS0zZw3SCkj0Z7n6AMnUiiyoJND+I= -github.com/aws/aws-sdk-go-v2/service/sts v1.35.1/go.mod h1:0bxIatfN0aLq4mjoLDeBpOjOke68OsFlXPDFJ7V0MYw= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/bazelbuild/buildtools v0.0.0-20250715102656-62b9413b08bb h1:KCEfAAZ5hZH+CEMzShOZ4nCro20Ohf6whugQc2PLZiM= -github.com/bazelbuild/buildtools v0.0.0-20250715102656-62b9413b08bb/go.mod h1:PLNUetjLa77TCCziPsz0EI8a6CUxgC+1jgmWv0H25tg= -github.com/bazelbuild/rules_go v0.55.1 h1:cQYGcunY8myOB+0Ym6PGQRhc/milkRcNv0my3XgxaDU= -github.com/bazelbuild/rules_go v0.55.1/go.mod h1:T90Gpyq4HDFlsrvtQa2CBdHNJ2P4rAu/uUTmQbanzf0= +github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.50.22 h1:BUhSaO2qLk2jkcyLebcvDmbdOunVe/Wq8RsCyI8szL0= +github.com/aws/aws-sdk-go v1.50.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.25.0 h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBWDSQ= +github.com/aws/aws-sdk-go-v2 v1.25.0/go.mod h1:G104G1Aho5WqF+SR3mDIobTABQzpYV0WxMsKxlMggOA= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0/go.mod h1:5zGj2eA85ClyedTDK+Whsu+w9yimnVIZvhvBKrDquM8= +github.com/aws/aws-sdk-go-v2/config v1.27.1 h1:oxvGd/cielb+oumJkQmXI0i5tQCRqfdCHV58AfE0pGY= +github.com/aws/aws-sdk-go-v2/config v1.27.1/go.mod h1:SpmaZYWeTF91NQcnnp2AScnZawBWwdkYCupHRNIhVSQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.1 h1:H4WlK2OnVotRmbVgS8Ww2Z4B3/dDHxDS7cW6EiCECN4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.1/go.mod h1:qTfT/OIE9RAVirZDq0PcEYOOM4Pkmf1Hrk1iInKRS4k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.3 h1:PYWtYuCP+gYQ576MS4QRn7y1+kp+OZzzG7jlwnFj1wQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.3/go.mod h1:pM36l33ZLR/ARJqjkWHMS8dRS6tmBSd5SJEV29nCBFU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylgatV+yTlQ8JTuLfDD0BnFvnQRc+o6tbZ4M= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0/go.mod h1:hL6BWM/d/qz113fVitZjbXR0E+RCTU1+x+1Idyn5NgE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 h1:TkbRExyKSVHELwG9gz2+gql37jjec2R5vus9faTomwE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0/go.mod h1:T3/9xMKudHhnj8it5EqIrhvv11tVZqWYkKcot+BFStc= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.39.1 h1:gdukBEVzo0O/0UiR0ee5zQokJ7RIP0p1jF00ayKHZ4o= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.39.1/go.mod h1:6ioQn0JPZSvTdXmnUAQa9h7x8m+KU63rkgiAD1ZLnqc= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.34.1 h1:fGpjGBqtfTz2mymcChLB42StEw0vHwsHqDFnctmoOQ8= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.34.1/go.mod h1:/AZ24tZ/H2O3sNeLyv15mm5XqhhzeehASgNBI7oerFw= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.148.1 h1:OGzK1PwB0sCE2Zwy6ISs/XSul4lrujQf3doXvmGqCwg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.148.1/go.mod h1:ntWksNNQcXImRQMdxab74tp+H94neF/TwQJ9Ndxb04k= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.29.1 h1:V+AqIZnytQg3jgEBIbvLYzxcMagTvC6kzhex0ZbDcTE= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.29.1/go.mod h1:wBfYhqVwYqHxYkU3l5WZCdAyorLCFZf8T5ZnY6CPyw4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0/go.mod h1:SxIkWpByiGbhbHYTo9CMTUnx2G4p4ZQMrDPcRRy//1c= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 h1:UiSyK6ent6OKpkMJN3+k5HZ4sk4UfchEaaW5wv7SblQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0/go.mod h1:l7kzl8n8DXoRyFz5cIMG70HnPauWa649TUhgw8Rq6lo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0/go.mod h1:l8gPU5RYGOFHJqWEpPMoRTP0VoaWQSkJdKo+hwWnnDA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECuPMIuZG7UKOzAnF24v6t4l+Z5Moay4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.20.2 h1:TAKRHjyAtRMUeqsPnjzI4EXz3WtIo3IXRhJiIPa4MFo= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.20.2/go.mod h1:BRuiq4shgrokCvNWSXVHz1hhH5sNSLW0ZruTV0jiNMQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.50.2 h1:UxJGNZ+/VhocG50aui1p7Ub2NjDzijCpg8Y3NuznijM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.50.2/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.2 h1:Wq73CAj0ktbUHufBTar4uMVzP7JHraTq6ZMloCAQxRk= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.2/go.mod h1:JsJDZFHwLGZu6dxhV9EV1gJrMnCeE4GEXubSZA59xdA= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.1 h1:GokXLGW3JkH/XzEVp1jDVRxty1eNGB7emkjDG1qxGK8= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.1/go.mod h1:YqbU3RS/pkDVu+v+Nwxvn0i1WB0HkNWEePWbmODEbbs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.1 h1:2oxSGiYNxTHsuRuPD9McWvcvR6s61G3ssZLyQzcxQL0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.1/go.mod h1:olUAyg+FaoFaL/zFaeQQONjOZ9HXoxgvI/c7mQTYz7M= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.1 h1:QFT2KUWaVwwGi5/2sQNBOViFpLSkZmiyiHUxE2k6sOU= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.1/go.mod h1:nXfOBMWPokIbOY+Gi7a1psWMSvskUCemZzI+SMB7Akc= +github.com/aws/smithy-go v1.20.0 h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ= +github.com/aws/smithy-go v1.20.0/go.mod h1:uo5RKksAl4PzhqaAbjd4rLgFoq5koTsQKYuGe7dklGc= +github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e h1:XmPu4mXICgdGnC5dXGjUGbwUD/kUmS0l5Aop3LaevBM= +github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo= +github.com/bazelbuild/rules_go v0.42.0 h1:aY2smc3JWyUKOjGYmOKVLX70fPK9ON0rtwQojuIeUHc= +github.com/bazelbuild/rules_go v0.42.0/go.mod h1:TMHmtfpvyfsxaqfL9WnahCsXMWDMICTw7XeK9yVb+YU= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -194,65 +177,73 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuP github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= -github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= -github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= -github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= -github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= -github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= -github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= +github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= -github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= -github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.25 h1:/XexFhM8FFlFLTS/zKNEWgIZ8Gl5GaWrHsMarGj/PRQ= -github.com/coredns/corefile-migration v1.0.25/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= -github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c h1:ToajP6trZoiqlZ3Z4uoG1P02/wtqSw1AcowOXOYjATk= github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c/go.mod h1:gZoZ0+POlM1ge/VUxWpMmZVNPzzMJ7l436CgkQ5+qzU= -github.com/daniel-weisse/go-sev-guest v0.0.0-20250728114912-0c2ba277c52b h1:pElX9BS0PnYZS/tznradDYbo82kvG2yisWGvZGsDnVs= -github.com/daniel-weisse/go-sev-guest v0.0.0-20250728114912-0c2ba277c52b/go.mod h1:SK9vW+uyfuzYdVN0m8BShL3OQCtXZe/JPF7ZkpD3760= github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= -github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= -github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/derpsteb/tink-go/v2 v2.0.0-20231002051717-a808e454eed6 h1:FVii9oXvddz9sFir5TRYjQKrzJLbVD/hibT+SnRSDzg= +github.com/derpsteb/tink-go/v2 v2.0.0-20231002051717-a808e454eed6/go.mod h1:QAbyq9LZncomYnScxlfaHImbV4ieNIe6bnu/Xcqqox4= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= +github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -263,152 +254,167 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/edgelesssys/go-azguestattestation v0.0.0-20250408071817-8c4457b235ff h1:V6A5kD0+c1Qg4X72Lg+zxhCZk+par436sQdgLvMCBBc= -github.com/edgelesssys/go-azguestattestation v0.0.0-20250408071817-8c4457b235ff/go.mod h1:Lz4QaomI4wU2YbatD4/W7vatW2Q35tnkoJezB1clscc= -github.com/edgelesssys/go-tdx-qpl v0.0.0-20250129202750-607ac61e2377 h1:5JMJiBhvOUUR7EZ0UyeSy7a1WrqB2eM+DX3odLSHAh4= -github.com/edgelesssys/go-tdx-qpl v0.0.0-20250129202750-607ac61e2377/go.mod h1:IC72qyykUIWl0ZmSk53L4xbLCFDBEGZVaujUmPQOEyw= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/edgelesssys/go-azguestattestation v0.0.0-20230707101700-a683be600fcf h1:1iKB7b+i7svWC0aKXwggi+kHf0K57g8r9hN4VOpJYYg= +github.com/edgelesssys/go-azguestattestation v0.0.0-20230707101700-a683be600fcf/go.mod h1:T8Rv3qrCpUJZbKq49OA9tcC1ZbRkGtDxiafsj++LYIE= +github.com/edgelesssys/go-tdx-qpl v0.0.0-20240123150912-dcad3c41ec5f h1:TCGUmmH50cQBGXPJsn32APf93fmWQXcSMi7pMbDPtV0= +github.com/edgelesssys/go-tdx-qpl v0.0.0-20240123150912-dcad3c41ec5f/go.mod h1:IC72qyykUIWl0ZmSk53L4xbLCFDBEGZVaujUmPQOEyw= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= -github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= -github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= -github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= -github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= -github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= -github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/foxboron/go-uefi v0.0.0-20250625111927-a3183a1bfc84 h1:8VH593jGTaWbdMdkVr91w0fnzmFmgV69J2OCOjokqp8= -github.com/foxboron/go-uefi v0.0.0-20250625111927-a3183a1bfc84/go.mod h1:q85c4IRlhhwdRJgGIUWrisDjU8dgcMj8dnXZCXo3hus= -github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= -github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/foxboron/go-uefi v0.0.0-20240128152106-48be911532c2 h1:qGlg/7H49H30Eu7nkCBA7YxNmW30ephqBf7xIxlAGuQ= +github.com/foxboron/go-uefi v0.0.0-20240128152106-48be911532c2/go.mod h1:ffg/fkDeOYicEQLoO2yFFGt00KUTYVXI+rfnc8il6vQ= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= -github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= -github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= -github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= -github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= -github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= -github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= +github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= +github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= +github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= +github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= +github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= -github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= +github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= -github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= -github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0= -github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= -github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= -github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw= +github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-attestation v0.5.1 h1:jqtOrLk5MNdliTKjPbIPrAaRKJaKW+0LIU2n/brJYms= github.com/google/go-attestation v0.5.1/go.mod h1:KqGatdUhg5kPFkokyzSBDxwSCFyRgIgtRkMp6c3lOBQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-configfs-tsm v0.3.3-0.20240919001351-b4b5b84fdcbc h1:SG12DWUUM5igxm+//YX5Yq4vhdoRnOG9HkCodkOn+YU= -github.com/google/go-configfs-tsm v0.3.3-0.20240919001351-b4b5b84fdcbc/go.mod h1:EL1GTDFMb5PZQWDviGfZV9n87WeGTR/JUg13RfwkgRo= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= -github.com/google/go-tdx-guest v0.3.2-0.20250505161510-9efd53b4a100 h1:E5ArM6vmtaUbgTZM8W3G+bgseO3i5l8BpJCKuUH7WVc= -github.com/google/go-tdx-guest v0.3.2-0.20250505161510-9efd53b4a100/go.mod h1:uHy3VaNXNXhl0fiPxKqTxieeouqQmW6A0EfLcaeCYBk= -github.com/google/go-tpm v0.9.5 h1:ocUmnDebX54dnW+MQWGQRbdaAcJELsa6PqZhJ48KwVU= -github.com/google/go-tpm v0.9.5/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= -github.com/google/go-tpm-tools v0.4.4 h1:oiQfAIkc6xTy9Fl5NKTeTJkBTlXdHsxAofmQyxBKY98= -github.com/google/go-tpm-tools v0.4.4/go.mod h1:T8jXkp2s+eltnCDIsXR84/MTcVU9Ja7bh3Mit0pa4AY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-configfs-tsm v0.2.2 h1:YnJ9rXIOj5BYD7/0DNnzs8AOp7UcvjfTvt215EWcs98= +github.com/google/go-configfs-tsm v0.2.2/go.mod h1:EL1GTDFMb5PZQWDviGfZV9n87WeGTR/JUg13RfwkgRo= +github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= +github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-sev-guest v0.0.0-20230928233922-2dcbba0a4b9d h1:6o4Z/vQqNUH+cEagfx1Ez5ElK70iZulEXZwmLnRo44I= +github.com/google/go-sev-guest v0.0.0-20230928233922-2dcbba0a4b9d/go.mod h1:hc1R4R6f8+NcJwITs0L90fYWTsBpd1Ix+Gur15sqHDs= +github.com/google/go-tdx-guest v0.3.1 h1:gl0KvjdsD4RrJzyLefDOvFOUH3NAJri/3qvaL5m83Iw= +github.com/google/go-tdx-guest v0.3.1/go.mod h1:/rc3d7rnPykOPuY8U9saMyEps0PZDThLk/RygXm04nE= +github.com/google/go-tpm-tools v0.4.3-0.20240112165732-912a43636883 h1:EQ1rGgyI8IEBApvDH9HPF7ehUd/6H6SxSNKVDF5z/GU= +github.com/google/go-tpm-tools v0.4.3-0.20240112165732-912a43636883/go.mod h1:n1reZGzBZc7VGW4/FanBgZFE5upVr58j0359Sevu32U= github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus= github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -416,39 +422,44 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/logger v1.1.1 h1:+6Z2geNxc9G+4D4oDO9njjjn2d0wN5d7uOo0vOIW1NQ= github.com/google/logger v1.1.1/go.mod h1:BkeJZ+1FhQ+/d087r4dzojEg1u2ZX+ZqG1jTUrLM+zQ= -github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= -github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= -github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= -github.com/gophercloud/utils/v2 v2.0.0-20250711132455-9770683b100a h1:erVLycqmezd0+eukgQ4xgLxGsByDKvqJxLXVc35tUYI= -github.com/gophercloud/utils/v2 v2.0.0-20250711132455-9770683b100a/go.mod h1:1mckc18GQSFLRhDy2BjPGkkpbrjxY5iwX/oxpdTE2kw= -github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= -github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.9.0 h1:zKvmHOmHuaZlnx9d2DJpEgbMxrGt/+CJ/bKOKQh9Xzo= +github.com/gophercloud/gophercloud v1.9.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56 h1:sH7xkTfYzxIEgzq1tDHIMKRh1vThOEOGNsettdEeLbE= +github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56/go.mod h1:VSalo4adEk+3sNkmVJLnhHoOyOYYS8sTWLG4mv5BKto= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -457,66 +468,59 @@ github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuD github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.5.0 h1:EkQ/v+dDNUqnuVpmS5fPqyY71NXVgT5gf32+57xY8g0= -github.com/hashicorp/go-cty v1.5.0/go.mod h1:lFUCG5kd8exDobgSfyj4ONE/dc822kiYMguVKdHGMLM= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.18 h1:DLfC677GfKEpSAFpEWvl1vXsGpEcSHmbhBaPLrdDQHc= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.18/go.mod h1:t/eaR/mi2mw3klfl1WEAuiLKrlZ/Q8cosmsT+RIPLu0= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.11 h1:J9zGa9SlcOHT3SQTj0Vv3shHo0anWbs58weURGCgChI= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.11/go.mod h1:iAOCu7/lG5eugg8+k7NVvQt0IpWT8s2Q9wnMtC/guM4= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.14 h1:oK4OQ5EPbx/66dAvitksV+OdrQ86SZEj3B6VSZrbdEY= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.14/go.mod h1:fWxrv9YkAMqtsISde5mcutoMvuiH4kyg1AlDzzmqRh8= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.13 h1:NGBZnF+yPRZ3gjFl69Y2m58/U0iyB2oH9HaznL9tekA= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.13/go.mod h1:4Xb+6d8VPeDcUNuh4toPqJlDpkajeJyIQeg36TtWhKw= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 h1:qdxeZvDMRGZ3YSE4Oz0Pp7WUSUn5S6cWZguEOkEVL50= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9/go.mod h1:DcXbvVpgNWbxGmxgmu3QN64bEydMu14Cpe34RRR30HY= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 h1:/7SKkYIhA8cr3l8m1EKT6Q90bPoSVqqVBuQ6HgoMIkw= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11/go.mod h1:LepS5s6ESGE0qQMpYaui5lX+mQYeiYiy06VzwWRioO8= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.11 h1:qXOa2uFzT8eORzgfLZSp1dvig2l/70LJIr6634f5HMM= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.11/go.mod h1:HSaOaX/lv3ShCdilUYbOTPnSvmoZ9xtQhgw+8hYcZkg= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= -github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9 h1:FW0YttEnUNDJ2WL9XcrrfteS1xW8u+sh4ggM8pN5isQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= -github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 h1:I8bynUKMh9I7JdwtW9voJ0xmHvBpxQtLjrMFDYmhOxY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0/go.mod h1:oKHSQs4ivIfZ3fbXGQOop1XuDfdSb8RIsWTGaAanSfg= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= -github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= -github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= -github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= -github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= -github.com/hashicorp/terraform-plugin-framework v1.15.0 h1:LQ2rsOfmDLxcn5EeIwdXFtr03FVsNktbbBci8cOKdb4= -github.com/hashicorp/terraform-plugin-framework v1.15.0/go.mod h1:hxrNI/GY32KPISpWqlCoTLM9JZsGH3CyYlir09bD/fI= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 h1:OQnlOt98ua//rCw+QhBbSqfW3QbwtVrcdWeQN5gI3Hw= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0/go.mod h1:lZvZvagw5hsJwuY7mAY6KUz45/U6fiDR0CzQAwWD0CA= -github.com/hashicorp/terraform-plugin-go v0.28.0 h1:zJmu2UDwhVN0J+J20RE5huiF3XXlTYVIleaevHZgKPA= -github.com/hashicorp/terraform-plugin-go v0.28.0/go.mod h1:FDa2Bb3uumkTGSkTFpWSOwWJDwA7bf3vdP3ltLDTH6o= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-framework v1.5.0 h1:8kcvqJs/x6QyOFSdeAyEgsenVOUeC/IyKpi2ul4fjTg= +github.com/hashicorp/terraform-plugin-framework v1.5.0/go.mod h1:6waavirukIlFpVpthbGd2PUNYaFedB0RwW3MDzJ/rtc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-go v0.21.0 h1:VSjdVQYNDKR0l2pi3vsFK1PdMQrw6vGOshJXMNFeVc0= +github.com/hashicorp/terraform-plugin-go v0.21.0/go.mod h1:piJp8UmO1uupCvC9/H74l2C6IyKG0rW4FDedIpwW5RQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-plugin-testing v1.13.2 h1:mSotG4Odl020vRjIenA3rggwo6Kg6XCKIwtRhYgp+/M= -github.com/hashicorp/terraform-plugin-testing v1.13.2/go.mod h1:WHQ9FDdiLoneey2/QHpGM/6SAYf4A7AZazVg7230pLE= -github.com/hashicorp/terraform-registry-address v0.2.5 h1:2GTftHqmUhVOeuu9CW3kwDkRe4pcBDq0uuK5VJngU1M= -github.com/hashicorp/terraform-registry-address v0.2.5/go.mod h1:PpzXWINwB5kuVS5CA7m1+eO2f1jKb5ZDIxrOPfpnGkg= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao= +github.com/hashicorp/terraform-plugin-testing v1.6.0 h1:Wsnfh+7XSVRfwcr2jZYHsnLOnZl7UeaOBvsx6dl/608= +github.com/hashicorp/terraform-plugin-testing v1.6.0/go.mod h1:cJGG0/8j9XhHaJZRC+0sXFI4uzqQZ9Az4vh6C4GJpFE= +github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= +github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= @@ -525,37 +529,45 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= -github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= -github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= -github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= -github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -572,37 +584,37 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhR github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/letsencrypt/boulder v0.0.0-20240216200101-4eb5e3caa228 h1:Y0fwz/hllcpgv9X24KyS/x8O6MdsOx217vAp1XV4Is0= +github.com/letsencrypt/boulder v0.0.0-20240216200101-4eb5e3caa228/go.mod h1:xFWTlJ4v5tijJj9HVIJi6TOij35bNTuAGwgOFmEg1p8= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -613,23 +625,22 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= -github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= -github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= -github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= @@ -638,117 +649,116 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olareg/olareg v0.1.2 h1:75G8X6E9FUlzL/CSjgFcYfMgNzlc7CxULpUUNsZBIvI= -github.com/olareg/olareg v0.1.2/go.mod h1:TWs+N6pO1S4bdB6eerzUm/ITRQ6kw91mVf9ZYeGtw+Y= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/olareg/olareg v0.0.0-20240206155231-8ba4b6726143 h1:iN0dytB++Fnk2axqdCAfUyqrpHsOv2FfVH5MD3nXscA= +github.com/olareg/olareg v0.0.0-20240206155231-8ba4b6726143/go.mod h1:YazBF7Kwb7UGtorlfDOqMv4cxPXERX55H0xbiIYFzow= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= -github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/planetscale/vtprotobuf v0.6.1-0.20241121165744-79df5c4772f2 h1:1sLMdKq4gNANTj0dUibycTLzpIEKVnLnbaEkxws78nw= -github.com/planetscale/vtprotobuf v0.6.1-0.20241121165744-79df5c4772f2/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= -github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= -github.com/regclient/regclient v0.9.0 h1:c3hNJZvtv8lMqhP0jGCa4d9j2n4688VCfhCWddGfWfk= -github.com/regclient/regclient v0.9.0/go.mod h1:Adv7tukwdX+oDTszfILrjerGk55Pg2nKlbshj94U3rg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/regclient/regclient v0.5.7 h1:d6bXhvz7UYJM+r20ls60RIVdoYh/rp+PygD/dIsJ9UA= +github.com/regclient/regclient v0.5.7/go.mod h1:5QTWmekWy6+gq13Z6U69zsMQV5lvgxg2T7AefHT6BmA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= -github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos= +github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI= -github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= -github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI= -github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M= -github.com/samber/slog-multi v1.4.1 h1:OVBxOKcorBcGQVKjwlraA41JKWwHQyB/3KfzL3IJAYg= -github.com/samber/slog-multi v1.4.1/go.mod h1:im2Zi3mH/ivSY5XDj6LFcKToRIWPw1OcjSVSdXt+2d0= +github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= +github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/slog-multi v1.0.2 h1:6BVH9uHGAsiGkbbtQgAOQJMpKgV8unMrHhhJaw+X1EQ= +github.com/samber/slog-multi v1.0.2/go.mod h1:uLAvHpGqbYgX4FSL0p1ZwoLuveIAJvBECtE07XmYvFo= github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= -github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= -github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= -github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= -github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/siderolabs/talos/pkg/machinery v1.10.5 h1:R0ZSf9OrHIQ70+LEpQVK/KVCq3E6sUi/OpIjLDVH3Sk= -github.com/siderolabs/talos/pkg/machinery v1.10.5/go.mod h1:GxGnHH6gtX3J9s713+UbKvE9rLnlbYLv+Yn4rqD9Jh0= -github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= -github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= -github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= -github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= -github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= +github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ= +github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU= +github.com/schollz/progressbar/v3 v3.14.1 h1:VD+MJPCr4s3wdhTc7OEJ/Z3dAeBzJ7yKH/P4lC5yRTI= +github.com/schollz/progressbar/v3 v3.14.1/go.mod h1:Zc9xXneTzWXF81TGoqL71u0sBPjULtEHYtj/WVgVy8E= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/siderolabs/talos/pkg/machinery v1.6.4 h1:xzkHpHqVnio3IL2z44f/dG3TNVvSafZFvuyqlR6J7nY= +github.com/siderolabs/talos/pkg/machinery v1.6.4/go.mod h1:0x8zKxjyasg7n03lHXo5FX8ZaodFSSaompcGYa4alPE= +github.com/sigstore/rekor v1.3.5 h1:QoVXcS7NppKY+rpbEFVHr4evGDZBBSh65X0g8PXoUkQ= +github.com/sigstore/rekor v1.3.5/go.mod h1:CWqOk/fmnPwORQmm7SyDgB54GTJizqobbZ7yOP1lvw8= +github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= +github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= -github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -757,19 +767,20 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/tink-crypto/tink-go/v2 v2.4.0 h1:8VPZeZI4EeZ8P/vB6SIkhlStrJfivTJn+cQ4dtyHNh0= -github.com/tink-crypto/tink-go/v2 v2.4.0/go.mod h1:l//evrF2Y3MjdbpNDNGnKgCpo5zSmvUvnQ4MU+yE2sw= +github.com/thomasten/go-tpm v0.0.0-20230629092004-f43f8e2a59eb h1:840nUyrM9df2aLuzWuIkYx/DrUbX4KQZO6B9LD45aWo= +github.com/thomasten/go-tpm v0.0.0-20230629092004-f43f8e2a59eb/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -781,8 +792,6 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/vtolstov/go-ioctl v0.0.0-20151206205506-6be9cced4810 h1:X6ps8XHfpQjw8dUStzlMi2ybiKQ2Fmdw7UM+TinwvyM= github.com/vtolstov/go-ioctl v0.0.0-20151206205506-6be9cced4810/go.mod h1:dF0BBJ2YrV1+2eAIyEI+KeSidgA6HqoIP1u5XTlMq/o= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -797,135 +806,119 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= -github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= -github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo= -go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk= -go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0= -go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI= -go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A= -go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4= +github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= -go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.starlark.net v0.0.0-20240123142251-f86470692795 h1:LmbG8Pq7KDGkglKVn8VpZOZj6vb9b8nKEGcg9l03epM= +go.starlark.net v0.0.0-20240123142251-f86470692795/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= -golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= -golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -936,77 +929,105 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.244.0 h1:lpkP8wVibSKr++NCD36XzTk/IzeKJ3klj7vbj+XU5pE= -google.golang.org/api v0.244.0/go.mod h1:dMVhVcylamkirHdzEBAIQWUCgqY885ivNeZYd7VAVr8= +google.golang.org/api v0.165.0 h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc= +google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= -google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 h1:mVXdvnmR3S3BQOqHECm9NGMjYiRtEvDYcqAqedTXY6s= -google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074/go.mod h1:vYFwMYFbmA8vl6Z/krj/h7+U/AqpHknwJX4Uqgfyc7I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c h1:Zmyn5CV/jxzKnF+3d+xzbomACPwLQqVpLTpyXN5uTaQ= +google.golang.org/genproto v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v5 v5.9.0 h1:hx1VU2SGj4F8r9b8GUwJLdc8DNO8sy79ZGui0G05GLo= +gopkg.in/evanphx/json-patch.v5 v5.9.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk= +gopkg.in/go-jose/go-jose.v2 v2.6.2 h1:Rl5+9rA0kG3vsO1qhncMPRT5eHICihAMQYJkD7u/i4M= +gopkg.in/go-jose/go-jose.v2 v2.6.2/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1014,57 +1035,57 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ= -helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= -k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= -k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= -k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y= -k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88= -k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= -k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= -k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= -k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= -k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= -k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y= -k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI= -k8s.io/kubelet v0.33.3 h1:Cvy8+7Lq9saZds2ib7YBXbKvkMMJu3f5mzucmhSIJno= -k8s.io/kubelet v0.33.3/go.mod h1:Q1Cfr6VQq1m9v9XsE/mDmhTxPdN6NPU6Ug0e6mAqi58= -k8s.io/kubernetes v1.33.3 h1:dBx5Z2ZhR8kNzAwCoCz4j1niUbUrNUDVxeSj4/Ienu0= -k8s.io/kubernetes v1.33.3/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= -k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE= -k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -libvirt.org/go/libvirt v1.11005.0 h1:3ff08ii8h9XSe0OPmTCSUJ/3Nj+ssCAYhC6MK6ppwow= -libvirt.org/go/libvirt v1.11005.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= -oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= -oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +helm.sh/helm v2.17.0+incompatible h1:cSe3FaQOpRWLDXvTObQNj0P7WI98IG5yloU6tQVls2k= +helm.sh/helm v2.17.0+incompatible/go.mod h1:0Xbc6ErzwWH9qC55X1+hE3ZwhM3atbhCm/NbFZw5i+4= +helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= +helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o= +k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= +k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= +k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/cluster-bootstrap v0.29.0 h1:zCYdZ+LWDj4O86FB5tDKckIEsf2qBHjcp78xtjOzD3A= +k8s.io/cluster-bootstrap v0.29.0/go.mod h1:PDk+ouXC6bM2FFdo4u53IlWK3ZMWebH4TeE5BGmHWRw= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5 h1:QSpdNrZ9uRlV0VkqLvVO0Rqg8ioKi3oSw7O5P7pJV8M= +k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk= +k8s.io/kubelet v0.29.0/go.mod h1:kvKS2+Bz2tgDOG1S1q0TH2z1DasNuVF+8p6Aw7xvKkI= +k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= +k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= +k8s.io/mount-utils v0.29.0 h1:KcUE0bFHONQC10V3SuLWQ6+l8nmJggw9lKLpDftIshI= +k8s.io/mount-utils v0.29.0/go.mod h1:N3lDK/G1B8R/IkAt4NhHyqB07OqEr7P763z3TNge94U= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +libvirt.org/go/libvirt v1.10000.0 h1:fPVWdvZz8TSmMrTnsStih9ETsHlrzIgSEEiFzOLbhO8= +libvirt.org/go/libvirt v1.10000.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= +sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.16.0 h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g= +sigs.k8s.io/kustomize/api v0.16.0/go.mod h1:MnFZ7IP2YqVyVwMWoRxPtgl/5hpA+eCCrQR/866cm5c= +sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= +sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= +software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= diff --git a/go.work b/go.work index 0d9909f84..efe4516e1 100644 --- a/go.work +++ b/go.work @@ -1,6 +1,6 @@ -go 1.24.4 +go 1.22.1 -toolchain go1.24.4 +toolchain go1.22.1 use ( . diff --git a/hack/bazel-deps-mirror/README.md b/hack/bazel-deps-mirror/README.md index 354e506ab..9aa5fb21e 100644 --- a/hack/bazel-deps-mirror/README.md +++ b/hack/bazel-deps-mirror/README.md @@ -2,7 +2,7 @@ This directory contains tooling to automatically mirror the dependencies of a Bazel project into the Constellation CDN at `https://cdn.confidential.cloud/`. -The tool searches for various rules in the WORKSPACE.bzlmod file and all loaded .bzl files. +The tool searches for various rules in the WORKSPACE.bazel file and all loaded .bzl files. It has the following commands: - check: checks if the dependencies all have a mirror URL and optionally checks if the mirror really returns the expected file diff --git a/hack/bazel-deps-mirror/bazel-deps-mirror.go b/hack/bazel-deps-mirror/bazel-deps-mirror.go index b99f75e39..5e2d92617 100644 --- a/hack/bazel-deps-mirror/bazel-deps-mirror.go +++ b/hack/bazel-deps-mirror/bazel-deps-mirror.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // bazel-deps-mirror adds external dependencies to edgeless systems' mirror. diff --git a/hack/bazel-deps-mirror/check.go b/hack/bazel-deps-mirror/check.go index 30c65a131..64bd68b73 100644 --- a/hack/bazel-deps-mirror/check.go +++ b/hack/bazel-deps-mirror/check.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -40,7 +40,7 @@ func runCheck(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "mirror", flags.mirror, "mirrorUnauthenticated", flags.mirrorUnauthenticated) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) filesHelper, err := bazelfiles.New() if err != nil { @@ -89,7 +89,7 @@ func runCheck(cmd *cobra.Command, _ []string) error { } func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) { - log.Debug(fmt.Sprintf("Checking file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath)) issByFile = issues.NewByFile() buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { @@ -97,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return issByFile, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { - log.Debug(fmt.Sprintf("Checking rule: %q", rule.Name())) + log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name())) // check if the rule is a valid pinned dependency rule (has all required attributes) if issues := rules.ValidatePinned(rule); len(issues) > 0 { issByFile.Add(rule.Name(), issues...) diff --git a/hack/bazel-deps-mirror/fix.go b/hack/bazel-deps-mirror/fix.go index a6018dfe1..9a327ee27 100644 --- a/hack/bazel-deps-mirror/fix.go +++ b/hack/bazel-deps-mirror/fix.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -40,7 +40,7 @@ func runFix(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) fileHelper, err := bazelfiles.New() if err != nil { @@ -96,10 +96,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return iss, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -113,7 +113,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo return iss, nil } if !changed { - log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -142,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu return err } rules.SetHash(rule, learnedHash) - log.Debug(fmt.Sprintf("Learned hash for rule %q: %q", rule.Name(), learnedHash)) + log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash)) return nil } func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { - log.Debug(fmt.Sprintf("Fixing rule: %q", rule.Name())) + log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name())) // try to learn the hash if hash, err := rules.GetHash(rule); err != nil || hash == "" { diff --git a/hack/bazel-deps-mirror/internal/bazelfiles/files.go b/hack/bazel-deps-mirror/internal/bazelfiles/files.go index a891deb49..468ff24ac 100644 --- a/hack/bazel-deps-mirror/internal/bazelfiles/files.go +++ b/hack/bazel-deps-mirror/internal/bazelfiles/files.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package bazelfiles is used to find and handle Bazel WORKSPACE and bzl files. @@ -53,12 +53,12 @@ func (h *Helper) FindFiles() ([]BazelFile, error) { return append(bzlFiles, workspaceFile), nil } -// findWorkspaceFile returns the path to the Bazel WORKSPACE.bzlmod file (or WORKSPACE if the former doesn't exist). +// findWorkspaceFile returns the path to the Bazel WORKSPACE.bazel file (or WORKSPACE if the former doesn't exist). func (h *Helper) findWorkspaceFile() (BazelFile, error) { - if _, err := h.fs.Stat("WORKSPACE.bzlmod"); err == nil { + if _, err := h.fs.Stat("WORKSPACE.bazel"); err == nil { return BazelFile{ - RelPath: "WORKSPACE.bzlmod", - AbsPath: filepath.Join(h.workspaceRoot, "WORKSPACE.bzlmod"), + RelPath: "WORKSPACE.bazel", + AbsPath: filepath.Join(h.workspaceRoot, "WORKSPACE.bazel"), Type: BazelFileTypeWorkspace, }, nil } @@ -150,10 +150,8 @@ type BazelFile struct { type BazelFileType int const ( - // BazelFileTypeBzl is a .bzl file. - BazelFileTypeBzl = iota - // BazelFileTypeWorkspace is a WORKSPACE or WORKSPACE.bzlmod file. - BazelFileTypeWorkspace + BazelFileTypeBzl = iota // BazelFileTypeBzl is a .bzl file + BazelFileTypeWorkspace // BazelFileTypeWorkspace is a WORKSPACE or WORKSPACE.bazel file ) // LookupEnv can be the real os.LookupEnv or a mock for testing. diff --git a/hack/bazel-deps-mirror/internal/bazelfiles/files_test.go b/hack/bazel-deps-mirror/internal/bazelfiles/files_test.go index 889a490ec..c7d37770b 100644 --- a/hack/bazel-deps-mirror/internal/bazelfiles/files_test.go +++ b/hack/bazel-deps-mirror/internal/bazelfiles/files_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package bazelfiles @@ -42,22 +42,22 @@ func TestFindFiles(t *testing.T) { }, }, }, - "only WORKSPACE.bzlmod file": { - files: []string{"WORKSPACE.bzlmod"}, + "only WORKSPACE.bazel file": { + files: []string{"WORKSPACE.bazel"}, wantFiles: []BazelFile{ { - RelPath: "WORKSPACE.bzlmod", - AbsPath: "/WORKSPACE.bzlmod", + RelPath: "WORKSPACE.bazel", + AbsPath: "/WORKSPACE.bazel", Type: BazelFileTypeWorkspace, }, }, }, - "both WORKSPACE and WORKSPACE.bzlmod files": { - files: []string{"WORKSPACE", "WORKSPACE.bzlmod"}, + "both WORKSPACE and WORKSPACE.bazel files": { + files: []string{"WORKSPACE", "WORKSPACE.bazel"}, wantFiles: []BazelFile{ { - RelPath: "WORKSPACE.bzlmod", - AbsPath: "/WORKSPACE.bzlmod", + RelPath: "WORKSPACE.bazel", + AbsPath: "/WORKSPACE.bazel", Type: BazelFileTypeWorkspace, }, }, @@ -67,11 +67,11 @@ func TestFindFiles(t *testing.T) { wantErr: true, }, "all kinds": { - files: []string{"WORKSPACE", "WORKSPACE.bzlmod", "foo.bzl", "bar.bzl", "unused.txt", "folder/baz.bzl"}, + files: []string{"WORKSPACE", "WORKSPACE.bazel", "foo.bzl", "bar.bzl", "unused.txt", "folder/baz.bzl"}, wantFiles: []BazelFile{ { - RelPath: "WORKSPACE.bzlmod", - AbsPath: "/WORKSPACE.bzlmod", + RelPath: "WORKSPACE.bazel", + AbsPath: "/WORKSPACE.bazel", Type: BazelFileTypeWorkspace, }, { @@ -216,15 +216,15 @@ func TestDiff(t *testing.T) { assert := assert.New(t) require := require.New(t) fs := afero.NewMemMapFs() - err := afero.WriteFile(fs, "WORKSPACE.bzlmod", []byte(""), 0o644) + err := afero.WriteFile(fs, "WORKSPACE.bazel", []byte(""), 0o644) require.NoError(err) helper := Helper{ fs: fs, workspaceRoot: "/", } fileRef := BazelFile{ - RelPath: "WORKSPACE.bzlmod", - AbsPath: "/WORKSPACE.bzlmod", + RelPath: "WORKSPACE.bazel", + AbsPath: "/WORKSPACE.bazel", Type: BazelFileTypeWorkspace, } bf, err := helper.LoadFile(fileRef) @@ -247,10 +247,10 @@ func TestDiff(t *testing.T) { ) diff, err = helper.Diff(fileRef, bf) require.NoError(err) - assert.Equal("--- a/WORKSPACE.bzlmod\n+++ b/WORKSPACE.bzlmod\n@@ -1 +1 @@\n+workspace(name = \"foo\")\n", diff) + assert.Equal("--- a/WORKSPACE.bazel\n+++ b/WORKSPACE.bazel\n@@ -1 +1 @@\n+workspace(name = \"foo\")\n", diff) err = helper.WriteFile(fileRef, bf) require.NoError(err) - contents, err := afero.ReadFile(fs, "WORKSPACE.bzlmod") + contents, err := afero.ReadFile(fs, "WORKSPACE.bazel") assert.NoError(err) assert.Equal("workspace(name = \"foo\")\n", string(contents)) diff, err = helper.Diff(fileRef, bf) diff --git a/hack/bazel-deps-mirror/internal/issues/issues.go b/hack/bazel-deps-mirror/internal/issues/issues.go index a1cac0e64..f01495ceb 100644 --- a/hack/bazel-deps-mirror/internal/issues/issues.go +++ b/hack/bazel-deps-mirror/internal/issues/issues.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package issues can store and report issues found during the bazel-deps-mirror process. diff --git a/hack/bazel-deps-mirror/internal/issues/issues_test.go b/hack/bazel-deps-mirror/internal/issues/issues_test.go index 6e402f1bb..2df8baf16 100644 --- a/hack/bazel-deps-mirror/internal/issues/issues_test.go +++ b/hack/bazel-deps-mirror/internal/issues/issues_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package issues diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror.go b/hack/bazel-deps-mirror/internal/mirror/mirror.go index 947f565da..1593cc298 100644 --- a/hack/bazel-deps-mirror/internal/mirror/mirror.go +++ b/hack/bazel-deps-mirror/internal/mirror/mirror.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package mirror is used upload and download Bazel dependencies to and from a mirror. @@ -95,10 +95,10 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err } for _, url := range urls { - m.log.Debug(fmt.Sprintf("Mirroring file with hash %q from %q", hash, url)) + m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err)) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) continue } defer body.Close() @@ -129,13 +129,13 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { m.log.Debug(fmt.Sprintf("Learning new hash from %q", url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err)) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) continue } defer body.Close() streamedHash := sha256.New() if _, err := io.Copy(streamedHash, body); err != nil { - m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %q", url, err)) + m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err)) } learnedHash := hex.EncodeToString(streamedHash.Sum(nil)) m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash)) @@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { // Check checks if a file is present and has the correct hash in the CAS mirror. func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { - m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %q", expectedHash)) + m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash)) if m.unauthenticated { return m.checkUnauthenticated(ctx, expectedHash) } @@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { // It uses the authenticated CAS s3 endpoint to download the file metadata. func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error { key := path.Join(keyBase, expectedHash) - m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %q, Key: %q}", m.bucket, key)) + m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)) attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{ Bucket: &m.bucket, Key: &key, @@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string // checksums are not guaranteed to be present // and if present, they are only meaningful for single part objects // fallback if checksum cannot be verified from attributes - m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %q. Falling back to download.", key)) + m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key)) return m.checkUnauthenticated(ctx, expectedHash) } @@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri if err != nil { return err } - m.log.Debug(fmt.Sprintf("Check: http get {Url: %q}", pubURL)) + m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL)) req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody) if err != nil { return err @@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error key := path.Join(keyBase, hash) if m.dryRun { - m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %q, Key: %q}", m.bucket, key)) + m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)) return nil } - m.log.Debug(fmt.Sprintf("Uploading object with hash %q to \"s3://%s/%s\"", hash, m.bucket, key)) + m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)) _, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &m.bucket, Key: &key, diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror_test.go b/hack/bazel-deps-mirror/internal/mirror/mirror_test.go index c94a84507..541c1fa52 100644 --- a/hack/bazel-deps-mirror/internal/mirror/mirror_test.go +++ b/hack/bazel-deps-mirror/internal/mirror/mirror_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package mirror @@ -137,7 +137,7 @@ func TestMirror(t *testing.T) { unauthenticated: tc.unauthenticated, log: logger.NewTest(t), } - err := m.Mirror(t.Context(), tc.hash, []string{tc.upstreamURL}) + err := m.Mirror(context.Background(), tc.hash, []string{tc.upstreamURL}) if tc.wantErr { assert.Error(t, err) } else { @@ -180,7 +180,7 @@ func TestLearn(t *testing.T) { }, log: logger.NewTest(t), } - gotHash, err := m.Learn(t.Context(), []string{"https://example.com/foo"}) + gotHash, err := m.Learn(context.Background(), []string{"https://example.com/foo"}) if tc.wantErr { assert.Error(err) return @@ -274,7 +274,7 @@ func TestCheck(t *testing.T) { }, log: logger.NewTest(t), } - err := m.Check(t.Context(), tc.hash) + err := m.Check(context.Background(), tc.hash) if tc.wantErr { assert.Error(t, err) } else { diff --git a/hack/bazel-deps-mirror/internal/rules/rules.go b/hack/bazel-deps-mirror/internal/rules/rules.go index 5cb90ccaa..d8a5269f3 100644 --- a/hack/bazel-deps-mirror/internal/rules/rules.go +++ b/hack/bazel-deps-mirror/internal/rules/rules.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package rules is used find and modify Bazel rules in WORKSPACE and bzl files. diff --git a/hack/bazel-deps-mirror/internal/rules/rules_test.go b/hack/bazel-deps-mirror/internal/rules/rules_test.go index 4e494d0a3..b607b99b8 100644 --- a/hack/bazel-deps-mirror/internal/rules/rules_test.go +++ b/hack/bazel-deps-mirror/internal/rules/rules_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package rules diff --git a/hack/bazel-deps-mirror/upgrade.go b/hack/bazel-deps-mirror/upgrade.go index e4ceca996..e2c07d5c2 100644 --- a/hack/bazel-deps-mirror/upgrade.go +++ b/hack/bazel-deps-mirror/upgrade.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -40,7 +40,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) fileHelper, err := bazelfiles.New() if err != nil { @@ -96,10 +96,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return iss, nil } - log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath)) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -113,7 +113,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror return iss, nil } if !changed { - log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath)) + log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -133,7 +133,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { - log.Debug(fmt.Sprintf("Upgrading rule: %q", rule.Name())) + log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name())) upstreamURLs, err := rules.UpstreamURLs(rule) if errors.Is(err, rules.ErrNoUpstreamURL) { diff --git a/hack/cli-k8s-compatibility/main.go b/hack/cli-k8s-compatibility/main.go index e1cb7c611..5361fcdf4 100644 --- a/hack/cli-k8s-compatibility/main.go +++ b/hack/cli-k8s-compatibility/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // cli-k8s-compatibility generates JSON output for a CLI version and its supported Kubernetes versions. @@ -45,7 +45,7 @@ func main() { } cliInfo := versionsapi.CLIInfo{ - Ref: versionsapi.CanonicalizeRef(*refFlag), + Ref: *refFlag, Stream: *streamFlag, Version: *versionFlag, Kubernetes: []string{}, diff --git a/hack/clidocgen/main.go b/hack/clidocgen/main.go index 599e30d22..b63b796cb 100644 --- a/hack/clidocgen/main.go +++ b/hack/clidocgen/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Clidocgen generates a Markdown page describing all CLI commands. diff --git a/hack/gocoverage/main.go b/hack/gocoverage/main.go index c6b755cde..d119a5bf5 100644 --- a/hack/gocoverage/main.go +++ b/hack/gocoverage/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -139,25 +139,25 @@ func parseTestLine(line string) (string, packageCoverage, error) { return pkg, coverage, nil } -func diff(a, b, touched string) error { - af, err := os.Open(a) +func diff(old, new, touched string) error { + oldf, err := os.Open(old) if err != nil { return err } - defer af.Close() - var aReport, bReport report - if err := json.NewDecoder(af).Decode(&aReport); err != nil { + defer oldf.Close() + var oldRep, newReport report + if err := json.NewDecoder(oldf).Decode(&oldRep); err != nil { return err } - bf, err := os.Open(b) + newf, err := os.Open(new) if err != nil { return err } - defer bf.Close() - if err := json.NewDecoder(bf).Decode(&bReport); err != nil { + defer newf.Close() + if err := json.NewDecoder(newf).Decode(&newReport); err != nil { return err } - diffs, err := diffCoverage(aReport, bReport) + diffs, err := diffCoverage(oldRep, newReport) if err != nil { return err } @@ -170,17 +170,17 @@ type coverageDiff struct { new *packageCoverage } -func diffCoverage(a, b report) (map[string]coverageDiff, error) { +func diffCoverage(old, new report) (map[string]coverageDiff, error) { allPkgs := make(map[string]struct{}) - for pkg := range a.Coverage { + for pkg := range old.Coverage { allPkgs[pkg] = struct{}{} } - for pkg := range b.Coverage { + for pkg := range new.Coverage { allPkgs[pkg] = struct{}{} } diffs := make(map[string]coverageDiff) for pkg := range allPkgs { - diffs[pkg] = coverageDiff{old: a.Coverage[pkg], new: b.Coverage[pkg]} + diffs[pkg] = coverageDiff{old: old.Coverage[pkg], new: new.Coverage[pkg]} if diffs[pkg].old == nil && diffs[pkg].new == nil { return nil, fmt.Errorf("both old and new coverage are nil for pkg %s", pkg) } diff --git a/hack/gocoverage/main_test.go b/hack/gocoverage/main_test.go index 267aa1522..613e74dad 100644 --- a/hack/gocoverage/main_test.go +++ b/hack/gocoverage/main_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/hack/image-fetch/main.go b/hack/image-fetch/main.go index 4124a9f54..7a88801a8 100644 --- a/hack/image-fetch/main.go +++ b/hack/image-fetch/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/hack/image-fetch/main_test.go b/hack/image-fetch/main_test.go index 30fe94af4..fa73af23e 100644 --- a/hack/image-fetch/main_test.go +++ b/hack/image-fetch/main_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/hack/logcollector/cmd/root.go b/hack/logcollector/cmd/root.go index a6f8f1501..9af040c49 100644 --- a/hack/logcollector/cmd/root.go +++ b/hack/logcollector/cmd/root.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/hack/logcollector/cmd/template.go b/hack/logcollector/cmd/template.go index 1812981ae..8776a52df 100644 --- a/hack/logcollector/cmd/template.go +++ b/hack/logcollector/cmd/template.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/hack/logcollector/fields/fields.go b/hack/logcollector/fields/fields.go index 4443b3415..d905f2b66 100644 --- a/hack/logcollector/fields/fields.go +++ b/hack/logcollector/fields/fields.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package fields diff --git a/hack/logcollector/internal/filebeat.go b/hack/logcollector/internal/filebeat.go index d46812e37..bb7fd8493 100644 --- a/hack/logcollector/internal/filebeat.go +++ b/hack/logcollector/internal/filebeat.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package internal diff --git a/hack/logcollector/internal/logstash.go b/hack/logcollector/internal/logstash.go index 808ed4d32..ea03365e6 100644 --- a/hack/logcollector/internal/logstash.go +++ b/hack/logcollector/internal/logstash.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package internal diff --git a/hack/logcollector/internal/metricbeat.go b/hack/logcollector/internal/metricbeat.go index 0d71ea125..603f2dcd4 100644 --- a/hack/logcollector/internal/metricbeat.go +++ b/hack/logcollector/internal/metricbeat.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package internal diff --git a/hack/logcollector/internal/prepare.go b/hack/logcollector/internal/prepare.go index acfec0114..8f6408ac4 100644 --- a/hack/logcollector/internal/prepare.go +++ b/hack/logcollector/internal/prepare.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package internal diff --git a/hack/logcollector/main.go b/hack/logcollector/main.go index 3535e59dc..f4f6aaf96 100644 --- a/hack/logcollector/main.go +++ b/hack/logcollector/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/hack/oci-pin/codegen.go b/hack/oci-pin/codegen.go index b733c834b..910056ed0 100644 --- a/hack/oci-pin/codegen.go +++ b/hack/oci-pin/codegen.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -45,14 +45,14 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "identifier", flags.identifier, "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath, "pkg", flags.pkg) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err) } - log.Debug(fmt.Sprintf("Generating Go code for OCI image %q.", name)) + log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } - log.Debug(fmt.Sprintf("OCI image digest: %q", digest)) + log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) if err := inject.Render(out, inject.PinningValues{ Package: flags.pkg, diff --git a/hack/oci-pin/internal/extract/extract.go b/hack/oci-pin/internal/extract/extract.go index ae37c9501..5b76e954d 100644 --- a/hack/oci-pin/internal/extract/extract.go +++ b/hack/oci-pin/internal/extract/extract.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package extract diff --git a/hack/oci-pin/internal/extract/extract_test.go b/hack/oci-pin/internal/extract/extract_test.go index 893a0de20..cf3dddd93 100644 --- a/hack/oci-pin/internal/extract/extract_test.go +++ b/hack/oci-pin/internal/extract/extract_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package extract diff --git a/hack/oci-pin/internal/inject/inject.go b/hack/oci-pin/internal/inject/inject.go index 7175d7e9f..f70ef4952 100644 --- a/hack/oci-pin/internal/inject/inject.go +++ b/hack/oci-pin/internal/inject/inject.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // inject renders Go source files with injected pinning values. diff --git a/hack/oci-pin/internal/inject/inject_test.go b/hack/oci-pin/internal/inject/inject_test.go index e33cf1887..9d209be11 100644 --- a/hack/oci-pin/internal/inject/inject_test.go +++ b/hack/oci-pin/internal/inject/inject_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package inject diff --git a/hack/oci-pin/internal/sums/sums.go b/hack/oci-pin/internal/sums/sums.go index b6acdd426..48545e9e3 100644 --- a/hack/oci-pin/internal/sums/sums.go +++ b/hack/oci-pin/internal/sums/sums.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // sums creates and combines sha256sums files. diff --git a/hack/oci-pin/internal/sums/sums_test.go b/hack/oci-pin/internal/sums/sums_test.go index 3bc61fe79..4796bd028 100644 --- a/hack/oci-pin/internal/sums/sums_test.go +++ b/hack/oci-pin/internal/sums/sums_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sums diff --git a/hack/oci-pin/merge.go b/hack/oci-pin/merge.go index d13b84c3c..94bafd52b 100644 --- a/hack/oci-pin/merge.go +++ b/hack/oci-pin/merge.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -36,7 +36,7 @@ func runMerge(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "inputs", flags.inputs, "output", flags.output, "logLevel", flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output)) diff --git a/hack/oci-pin/oci-pin.go b/hack/oci-pin/oci-pin.go index 1c68e42d3..5b3206c15 100644 --- a/hack/oci-pin/oci-pin.go +++ b/hack/oci-pin/oci-pin.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // oci-pin generates Go code and shasum files for OCI images. diff --git a/hack/oci-pin/sum.go b/hack/oci-pin/sum.go index ba77b727b..d6be5154a 100644 --- a/hack/oci-pin/sum.go +++ b/hack/oci-pin/sum.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -42,14 +42,14 @@ func runSum(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting repo tag: %w", err) } - log.Debug(fmt.Sprintf("Generating sum file for OCI image %q.", name)) + log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("extracting OCI image digest: %w", err) } - log.Debug(fmt.Sprintf("OCI image digest: %q", digest)) + log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) refs := []sums.PinnedImageReference{ { diff --git a/hack/qemu-metadata-api/BUILD.bazel b/hack/qemu-metadata-api/BUILD.bazel index 2cfbeb83a..67811771c 100644 --- a/hack/qemu-metadata-api/BUILD.bazel +++ b/hack/qemu-metadata-api/BUILD.bazel @@ -13,9 +13,8 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api", visibility = ["//visibility:private"], deps = [ - "//hack/qemu-metadata-api/dhcp/dnsmasq", - "//hack/qemu-metadata-api/dhcp/virtwrapper", "//hack/qemu-metadata-api/server", + "//hack/qemu-metadata-api/virtwrapper", "//internal/logger", "@org_libvirt_go_libvirt//:libvirt", ], diff --git a/hack/qemu-metadata-api/dhcp/BUILD.bazel b/hack/qemu-metadata-api/dhcp/BUILD.bazel deleted file mode 100644 index 21ba5404c..000000000 --- a/hack/qemu-metadata-api/dhcp/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "dhcp", - srcs = ["dhcp.go"], - importpath = "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp", - visibility = ["//visibility:public"], -) diff --git a/hack/qemu-metadata-api/dhcp/dnsmasq/BUILD.bazel b/hack/qemu-metadata-api/dhcp/dnsmasq/BUILD.bazel deleted file mode 100644 index ab5bbd249..000000000 --- a/hack/qemu-metadata-api/dhcp/dnsmasq/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel/go:go_test.bzl", "go_test") - -go_library( - name = "dnsmasq", - srcs = ["dnsmasq.go"], - importpath = "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp/dnsmasq", - visibility = ["//visibility:public"], - deps = [ - "//hack/qemu-metadata-api/dhcp", - "@com_github_spf13_afero//:afero", - ], -) - -go_test( - name = "dnsmasq_test", - srcs = ["dnsmasq_test.go"], - embed = [":dnsmasq"], - deps = [ - "@com_github_spf13_afero//:afero", - "@com_github_stretchr_testify//assert", - "@com_github_stretchr_testify//require", - ], -) diff --git a/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq.go b/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq.go deleted file mode 100644 index 4374e074b..000000000 --- a/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package dnsmasq - -import ( - "bufio" - "strings" - - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp" - "github.com/spf13/afero" -) - -// DNSMasq is a DHCP lease getter for dnsmasq. -type DNSMasq struct { - leasesFileName string - fs *afero.Afero -} - -// New creates a new DNSMasq. -func New(leasesFileName string) *DNSMasq { - return &DNSMasq{ - leasesFileName: leasesFileName, - fs: &afero.Afero{Fs: afero.NewOsFs()}, - } -} - -// GetDHCPLeases returns the underlying DHCP leases. -func (d *DNSMasq) GetDHCPLeases() ([]dhcp.NetworkDHCPLease, error) { - file, err := d.fs.Open(d.leasesFileName) - if err != nil { - return nil, err - } - defer file.Close() - - // read file - var leases []dhcp.NetworkDHCPLease - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - // split by whitespace - fields := strings.Fields(line) - leases = append(leases, dhcp.NetworkDHCPLease{ - IPaddr: fields[2], - Hostname: fields[3], - }) - } - if err := scanner.Err(); err != nil { - return nil, err - } - - return leases, nil -} diff --git a/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq_test.go b/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq_test.go deleted file mode 100644 index 2f079a471..000000000 --- a/hack/qemu-metadata-api/dhcp/dnsmasq/dnsmasq_test.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package dnsmasq - -import ( - "testing" - - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetDHCPLeases(t *testing.T) { - require := require.New(t) - assert := assert.New(t) - - fs := afero.NewMemMapFs() - leasesFileName := "dnsmasq.leases" - leasesFile, err := fs.Create(leasesFileName) - require.NoError(err) - _, err = leasesFile.WriteString("1716219737 52:54:af:a1:98:9f 10.42.2.1 worker0 ff:c2:72:f6:09:00:02:00:00:ab:11:18:fc:48:85:40:3f:bc:41\n") - require.NoError(err) - _, err = leasesFile.WriteString("1716219735 52:54:7f:8f:ba:91 10.42.1.1 controlplane0 ff:c2:72:f6:09:00:02:00:00:ab:11:21:7c:b5:14:ec:43:b7:43\n") - require.NoError(err) - leasesFile.Close() - - d := DNSMasq{leasesFileName: leasesFileName, fs: &afero.Afero{Fs: fs}} - leases, err := d.GetDHCPLeases() - require.NoError(err) - - assert.Len(leases, 2) - assert.Equal("10.42.2.1", leases[0].IPaddr) - assert.Equal("worker0", leases[0].Hostname) - assert.Equal("10.42.1.1", leases[1].IPaddr) - assert.Equal("controlplane0", leases[1].Hostname) -} diff --git a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper.go b/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper.go deleted file mode 100644 index 7a31ff337..000000000 --- a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package virtwrapper diff --git a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cgo.go b/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cgo.go deleted file mode 100644 index b74c65110..000000000 --- a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cgo.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build cgo - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package virtwrapper - -import ( - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp" - - "libvirt.org/go/libvirt" -) - -// Connect wraps a libvirt connection. -type Connect struct { - conn *libvirt.Connect - networkName string -} - -// New creates a new libvirt Connct wrapper. -func New(conn *libvirt.Connect, networkName string) *Connect { - return &Connect{ - conn: conn, - networkName: networkName, - } -} - -// LookupNetworkByName looks up a network by name. -func (c *Connect) lookupNetworkByName(name string) (*libvirt.Network, error) { - net, err := c.conn.LookupNetworkByName(name) - if err != nil { - return nil, err - } - return net, nil -} - -// GetDHCPLeases returns the underlying DHCP leases. -func (c *Connect) GetDHCPLeases() ([]dhcp.NetworkDHCPLease, error) { - net, err := c.lookupNetworkByName(c.networkName) - if err != nil { - return nil, err - } - defer net.Free() - - leases, err := net.GetDHCPLeases() - if err != nil { - return nil, err - } - ret := make([]dhcp.NetworkDHCPLease, len(leases)) - for i, l := range leases { - ret[i] = dhcp.NetworkDHCPLease{ - IPaddr: l.IPaddr, - Hostname: l.Hostname, - } - } - return ret, nil -} diff --git a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cross.go b/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cross.go deleted file mode 100644 index 0467ab907..000000000 --- a/hack/qemu-metadata-api/dhcp/virtwrapper/virtwrapper_cross.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !cgo - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package virtwrapper - -import ( - "errors" - - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp" -) - -// Connect wraps a libvirt connection. -type Connect struct{} - -// GetDHCPLeases returns the underlying DHCP leases. -// This function errors if CGO is disabled. -func (n *Connect) GetDHCPLeases() ([]dhcp.NetworkDHCPLease, error) { - return nil, errors.New("using virtwrapper requires building with CGO") -} diff --git a/hack/qemu-metadata-api/main.go b/hack/qemu-metadata-api/main.go index 0693b9cd4..dd5be683c 100644 --- a/hack/qemu-metadata-api/main.go +++ b/hack/qemu-metadata-api/main.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -13,38 +13,29 @@ import ( "log/slog" "os" - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp/dnsmasq" - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp/virtwrapper" "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server" + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/logger" "libvirt.org/go/libvirt" ) func main() { bindPort := flag.String("port", "8080", "Port to bind to") - targetNetwork := flag.String("network", "constellation-network", "Name of the network in libvirt") + targetNetwork := flag.String("network", "constellation-network", "Name of the network in QEMU to use") libvirtURI := flag.String("libvirt-uri", "qemu:///system", "URI of the libvirt connection") - leasesFileName := flag.String("dnsmasq-leases", "", "Path to the dnsmasq leases file") initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret") flag.Parse() log := logger.NewJSONLogger(slog.LevelInfo) - var leaseGetter server.LeaseGetter - if *leasesFileName == "" { - conn, err := libvirt.NewConnect(*libvirtURI) - if err != nil { - log.With(slog.Any("error", err)).Error("Failed to connect to libvirt") - os.Exit(1) - } - defer conn.Close() - leaseGetter = virtwrapper.New(conn, *targetNetwork) - } else { - log.Info("Using dnsmasq leases file") - leaseGetter = dnsmasq.New(*leasesFileName) + conn, err := libvirt.NewConnect(*libvirtURI) + if err != nil { + log.With(slog.Any("error", err)).Error("Failed to connect to libvirt") + os.Exit(1) } + defer conn.Close() - serv := server.New(log, *targetNetwork, *initSecretHash, leaseGetter) + serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn}) if err := serv.ListenAndServe(*bindPort); err != nil { log.With(slog.Any("error", err)).Error("Failed to serve") os.Exit(1) diff --git a/hack/qemu-metadata-api/main_cross.go b/hack/qemu-metadata-api/main_cross.go index 3398538fa..18ac575ba 100644 --- a/hack/qemu-metadata-api/main_cross.go +++ b/hack/qemu-metadata-api/main_cross.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/hack/qemu-metadata-api/server/BUILD.bazel b/hack/qemu-metadata-api/server/BUILD.bazel index 683ec416a..5def36c03 100644 --- a/hack/qemu-metadata-api/server/BUILD.bazel +++ b/hack/qemu-metadata-api/server/BUILD.bazel @@ -10,7 +10,7 @@ go_library( ], visibility = ["//visibility:public"], deps = [ - "//hack/qemu-metadata-api/dhcp", + "//hack/qemu-metadata-api/virtwrapper", "//internal/cloud/metadata", "//internal/role", ], @@ -18,17 +18,22 @@ go_library( go_test( name = "server_test", - srcs = ["server_test.go"], + srcs = [ + "server_cgo_test.go", + "server_cross_test.go", + "server_test.go", + ], embed = [":server"], # keep pure = "on", # keep race = "off", deps = [ - "//hack/qemu-metadata-api/dhcp", + "//hack/qemu-metadata-api/virtwrapper", "//internal/cloud/metadata", "//internal/logger", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", + "@org_libvirt_go_libvirt//:libvirt", ], ) diff --git a/hack/qemu-metadata-api/server/server.go b/hack/qemu-metadata-api/server/server.go index 4394732b2..4f0cad9e9 100644 --- a/hack/qemu-metadata-api/server/server.go +++ b/hack/qemu-metadata-api/server/server.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package server @@ -14,7 +14,7 @@ import ( "net/http" "strings" - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp" + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/role" ) @@ -22,16 +22,16 @@ import ( // Server that provides QEMU metadata. type Server struct { log *slog.Logger - dhcpLeaseGetter LeaseGetter + virt virConnect network string initSecretHashVal []byte } // New creates a new Server. -func New(log *slog.Logger, network, initSecretHash string, getter LeaseGetter) *Server { +func New(log *slog.Logger, network, initSecretHash string, conn virConnect) *Server { return &Server{ log: log, - dhcpLeaseGetter: getter, + virt: conn, network: network, initSecretHashVal: []byte(initSecretHash), } @@ -139,7 +139,15 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { log := s.log.With(slog.String("peer", r.RemoteAddr)) log.Info("Serving GET request for /endpoint") - leases, err := s.dhcpLeaseGetter.GetDHCPLeases() + net, err := s.virt.LookupNetworkByName(s.network) + if err != nil { + log.With(slog.Any("error", err)).Error("Failed to lookup network") + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer net.Free() + + leases, err := net.GetDHCPLeases() if err != nil { log.With(slog.Any("error", err)).Error("Failed to get DHCP leases") http.Error(w, err.Error(), http.StatusInternalServerError) @@ -164,7 +172,13 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { // listAll returns a list of all active peers. func (s *Server) listAll() ([]metadata.InstanceMetadata, error) { - leases, err := s.dhcpLeaseGetter.GetDHCPLeases() + net, err := s.virt.LookupNetworkByName(s.network) + if err != nil { + return nil, err + } + defer net.Free() + + leases, err := net.GetDHCPLeases() if err != nil { return nil, err } @@ -187,7 +201,6 @@ func (s *Server) listAll() ([]metadata.InstanceMetadata, error) { return peers, nil } -// LeaseGetter is an interface for getting DHCP leases. -type LeaseGetter interface { - GetDHCPLeases() ([]dhcp.NetworkDHCPLease, error) +type virConnect interface { + LookupNetworkByName(name string) (*virtwrapper.Network, error) } diff --git a/hack/qemu-metadata-api/server/server_cgo_test.go b/hack/qemu-metadata-api/server/server_cgo_test.go new file mode 100644 index 000000000..59c569535 --- /dev/null +++ b/hack/qemu-metadata-api/server/server_cgo_test.go @@ -0,0 +1,41 @@ +//go:build cgo + +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package server + +import ( + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" + "libvirt.org/go/libvirt" +) + +type stubNetwork struct { + leases []libvirt.NetworkDHCPLease + getLeaseErr error +} + +func newStubNetwork(leases []virtwrapper.NetworkDHCPLease, getLeaseErr error) stubNetwork { + libvirtLeases := make([]libvirt.NetworkDHCPLease, len(leases)) + for i, l := range leases { + libvirtLeases[i] = libvirt.NetworkDHCPLease{ + IPaddr: l.IPaddr, + Hostname: l.Hostname, + } + } + return stubNetwork{ + leases: libvirtLeases, + getLeaseErr: getLeaseErr, + } +} + +func (n stubNetwork) GetDHCPLeases() ([]libvirt.NetworkDHCPLease, error) { + return n.leases, n.getLeaseErr +} + +func (n stubNetwork) Free() error { + return nil +} diff --git a/hack/qemu-metadata-api/server/server_cross_test.go b/hack/qemu-metadata-api/server/server_cross_test.go new file mode 100644 index 000000000..3f4488b26 --- /dev/null +++ b/hack/qemu-metadata-api/server/server_cross_test.go @@ -0,0 +1,31 @@ +//go:build !cgo + +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package server + +import "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" + +type stubNetwork struct { + leases []virtwrapper.NetworkDHCPLease + getLeaseErr error +} + +func newStubNetwork(leases []virtwrapper.NetworkDHCPLease, getLeaseErr error) stubNetwork { + return stubNetwork{ + leases: leases, + getLeaseErr: getLeaseErr, + } +} + +func (n stubNetwork) GetDHCPLeases() ([]virtwrapper.NetworkDHCPLease, error) { + return n.leases, n.getLeaseErr +} + +func (n stubNetwork) Free() error { + return nil +} diff --git a/hack/qemu-metadata-api/server/server_test.go b/hack/qemu-metadata-api/server/server_test.go index ada18aaa3..3b04d214d 100644 --- a/hack/qemu-metadata-api/server/server_test.go +++ b/hack/qemu-metadata-api/server/server_test.go @@ -1,19 +1,21 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package server import ( + "context" "encoding/json" + "errors" "io" "net/http" "net/http/httptest" "testing" - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp" + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/stretchr/testify/assert" @@ -21,13 +23,15 @@ import ( ) func TestListAll(t *testing.T) { + someErr := errors.New("error") + testCases := map[string]struct { - wantErr bool - stubLeaseGetter *stubLeaseGetter + wantErr bool + connect *stubConnect }{ "success": { - stubLeaseGetter: &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + connect: &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", @@ -40,12 +44,20 @@ func TestListAll(t *testing.T) { IPaddr: "192.0.200.1", Hostname: "worker-0", }, - }, + }, nil), }, }, + "LookupNetworkByName error": { + connect: &stubConnect{ + getNetworkErr: someErr, + }, + wantErr: true, + }, "GetDHCPLeases error": { - stubLeaseGetter: &stubLeaseGetter{ - getErr: assert.AnError, + connect: &stubConnect{ + network: stubNetwork{ + getLeaseErr: someErr, + }, }, wantErr: true, }, @@ -55,7 +67,7 @@ func TestListAll(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.stubLeaseGetter) + server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) res, err := server.listAll() @@ -64,56 +76,58 @@ func TestListAll(t *testing.T) { return } assert.NoError(err) - assert.Len(tc.stubLeaseGetter.leases, len(res)) + assert.Len(tc.connect.network.leases, len(res)) }) } } func TestListSelf(t *testing.T) { + someErr := errors.New("error") + testCases := map[string]struct { - remoteAddr string - stubLeaseGetter *stubLeaseGetter - wantErr bool + remoteAddr string + connect *stubConnect + wantErr bool }{ "success": { remoteAddr: "192.0.100.1:1234", - stubLeaseGetter: &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + connect: &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", }, - }, + }, nil), }, }, "listAll error": { remoteAddr: "192.0.100.1:1234", - stubLeaseGetter: &stubLeaseGetter{ - getErr: assert.AnError, + connect: &stubConnect{ + getNetworkErr: someErr, }, wantErr: true, }, "remoteAddr error": { remoteAddr: "", - stubLeaseGetter: &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + connect: &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", }, - }, + }, nil), }, wantErr: true, }, "peer not found": { remoteAddr: "192.0.200.1:1234", - stubLeaseGetter: &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + connect: &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", }, - }, + }, nil), }, wantErr: true, }, @@ -124,9 +138,9 @@ func TestListSelf(t *testing.T) { assert := assert.New(t) require := require.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.stubLeaseGetter) + server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, "http://192.0.0.1/self", nil) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/self", nil) require.NoError(err) req.RemoteAddr = tc.remoteAddr @@ -143,22 +157,22 @@ func TestListSelf(t *testing.T) { var metadata metadata.InstanceMetadata require.NoError(json.Unmarshal(metadataRaw, &metadata)) - assert.Equal(tc.stubLeaseGetter.leases[0].Hostname, metadata.Name) - assert.Equal(tc.stubLeaseGetter.leases[0].IPaddr, metadata.VPCIP) + assert.Equal(tc.connect.network.leases[0].Hostname, metadata.Name) + assert.Equal(tc.connect.network.leases[0].IPaddr, metadata.VPCIP) }) } } func TestListPeers(t *testing.T) { testCases := map[string]struct { - remoteAddr string - stubNetworkGetter *stubLeaseGetter - wantErr bool + remoteAddr string + connect *stubConnect + wantErr bool }{ "success": { remoteAddr: "192.0.100.1:1234", - stubNetworkGetter: &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + connect: &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", @@ -167,13 +181,13 @@ func TestListPeers(t *testing.T) { IPaddr: "192.0.200.1", Hostname: "worker-0", }, - }, + }, nil), }, }, "listAll error": { remoteAddr: "192.0.100.1:1234", - stubNetworkGetter: &stubLeaseGetter{ - getErr: assert.AnError, + connect: &stubConnect{ + getNetworkErr: errors.New("error"), }, wantErr: true, }, @@ -184,9 +198,9 @@ func TestListPeers(t *testing.T) { assert := assert.New(t) require := require.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.stubNetworkGetter) + server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, "http://192.0.0.1/peers", nil) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/peers", nil) require.NoError(err) req.RemoteAddr = tc.remoteAddr @@ -203,23 +217,22 @@ func TestListPeers(t *testing.T) { var metadata []metadata.InstanceMetadata require.NoError(json.Unmarshal(metadataRaw, &metadata)) - assert.Len(metadata, len(tc.stubNetworkGetter.leases)) + assert.Len(metadata, len(tc.connect.network.leases)) }) } } func TestInitSecretHash(t *testing.T) { - defaultConnect := &stubLeaseGetter{ - leases: []dhcp.NetworkDHCPLease{ + defaultConnect := &stubConnect{ + network: newStubNetwork([]virtwrapper.NetworkDHCPLease{ { IPaddr: "192.0.100.1", Hostname: "control-plane-0", }, - }, + }, nil), } - testCases := map[string]struct { - connect *stubLeaseGetter + connect *stubConnect method string wantHash string wantErr bool @@ -242,7 +255,7 @@ func TestInitSecretHash(t *testing.T) { server := New(logger.NewTest(t), "test", tc.wantHash, defaultConnect) - req, err := http.NewRequestWithContext(t.Context(), tc.method, "http://192.0.0.1/initsecrethash", nil) + req, err := http.NewRequestWithContext(context.Background(), tc.method, "http://192.0.0.1/initsecrethash", nil) require.NoError(err) w := httptest.NewRecorder() @@ -259,11 +272,11 @@ func TestInitSecretHash(t *testing.T) { } } -type stubLeaseGetter struct { - leases []dhcp.NetworkDHCPLease - getErr error +type stubConnect struct { + network stubNetwork + getNetworkErr error } -func (c stubLeaseGetter) GetDHCPLeases() ([]dhcp.NetworkDHCPLease, error) { - return c.leases, c.getErr +func (c stubConnect) LookupNetworkByName(_ string) (*virtwrapper.Network, error) { + return &virtwrapper.Network{Net: c.network}, c.getNetworkErr } diff --git a/hack/qemu-metadata-api/dhcp/virtwrapper/BUILD.bazel b/hack/qemu-metadata-api/virtwrapper/BUILD.bazel similarity index 68% rename from hack/qemu-metadata-api/dhcp/virtwrapper/BUILD.bazel rename to hack/qemu-metadata-api/virtwrapper/BUILD.bazel index 762c0b301..c1ad3b7c9 100644 --- a/hack/qemu-metadata-api/dhcp/virtwrapper/BUILD.bazel +++ b/hack/qemu-metadata-api/virtwrapper/BUILD.bazel @@ -7,10 +7,7 @@ go_library( "virtwrapper_cgo.go", "virtwrapper_cross.go", ], - importpath = "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/dhcp/virtwrapper", + importpath = "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper", visibility = ["//visibility:public"], - deps = [ - "//hack/qemu-metadata-api/dhcp", - "@org_libvirt_go_libvirt//:libvirt", - ], + deps = ["@org_libvirt_go_libvirt//:libvirt"], ) diff --git a/hack/qemu-metadata-api/dhcp/dhcp.go b/hack/qemu-metadata-api/virtwrapper/virtwrapper.go similarity index 50% rename from hack/qemu-metadata-api/dhcp/dhcp.go rename to hack/qemu-metadata-api/virtwrapper/virtwrapper.go index a6e3aa08a..4e5fb6732 100644 --- a/hack/qemu-metadata-api/dhcp/dhcp.go +++ b/hack/qemu-metadata-api/virtwrapper/virtwrapper.go @@ -1,12 +1,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ -package dhcp +package virtwrapper -// NetworkDHCPLease abstracts a DHCP lease. +// NetworkDHCPLease abstracts a libvirt DHCP lease. type NetworkDHCPLease struct { IPaddr string Hostname string diff --git a/hack/qemu-metadata-api/virtwrapper/virtwrapper_cgo.go b/hack/qemu-metadata-api/virtwrapper/virtwrapper_cgo.go new file mode 100644 index 000000000..cda0bed96 --- /dev/null +++ b/hack/qemu-metadata-api/virtwrapper/virtwrapper_cgo.go @@ -0,0 +1,56 @@ +//go:build cgo + +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package virtwrapper + +import "libvirt.org/go/libvirt" + +// Connect wraps a libvirt connection. +type Connect struct { + Conn *libvirt.Connect +} + +// LookupNetworkByName looks up a network by name. +func (c *Connect) LookupNetworkByName(name string) (*Network, error) { + net, err := c.Conn.LookupNetworkByName(name) + if err != nil { + return nil, err + } + return &Network{Net: net}, nil +} + +// Network wraps a libvirt network. +type Network struct { + Net virNetwork +} + +// GetDHCPLeases returns the underlying DHCP leases. +func (n *Network) GetDHCPLeases() ([]NetworkDHCPLease, error) { + leases, err := n.Net.GetDHCPLeases() + if err != nil { + return nil, err + } + ret := make([]NetworkDHCPLease, len(leases)) + for i, l := range leases { + ret[i] = NetworkDHCPLease{ + IPaddr: l.IPaddr, + Hostname: l.Hostname, + } + } + return ret, nil +} + +// Free the network resource. +func (n *Network) Free() { + _ = n.Net.Free() +} + +type virNetwork interface { + GetDHCPLeases() ([]libvirt.NetworkDHCPLease, error) + Free() error +} diff --git a/hack/qemu-metadata-api/virtwrapper/virtwrapper_cross.go b/hack/qemu-metadata-api/virtwrapper/virtwrapper_cross.go new file mode 100644 index 000000000..2faa95961 --- /dev/null +++ b/hack/qemu-metadata-api/virtwrapper/virtwrapper_cross.go @@ -0,0 +1,40 @@ +//go:build !cgo + +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package virtwrapper + +import "errors" + +// Connect wraps a libvirt connection. +type Connect struct{} + +// LookupNetworkByName looks up a network by name. +// This function errors if CGO is disabled. +func (c *Connect) LookupNetworkByName(_ string) (*Network, error) { + return nil, errors.New("using virtwrapper requires building with CGO") +} + +// Network wraps a libvirt network. +type Network struct { + Net Net +} + +// GetDHCPLeases returns the underlying DHCP leases. +// This function errors if CGO is disabled. +func (n *Network) GetDHCPLeases() ([]NetworkDHCPLease, error) { + return n.Net.GetDHCPLeases() +} + +// Free the network resource. +// This function does nothing if CGO is disabled. +func (n *Network) Free() {} + +// Net is a libvirt Network. +type Net interface { + GetDHCPLeases() ([]NetworkDHCPLease, error) +} diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 0655ebf97..a18b99dc7 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -1,13 +1,13 @@ module github.com/edgelesssys/constellation/v2/hack/tools -go 1.24.4 +go 1.22 require ( github.com/google/go-licenses v1.6.0 - github.com/google/keep-sorted v0.6.1 - github.com/katexochen/sh/v3 v3.11.0 - golang.org/x/tools v0.35.0 - golang.org/x/vuln v1.1.4 + github.com/google/keep-sorted v0.3.0 + github.com/katexochen/sh/v3 v3.7.0 + golang.org/x/tools v0.18.0 + golang.org/x/vuln v1.0.1 ) require ( @@ -15,8 +15,10 @@ require ( github.com/Workiva/go-datastructures v1.0.53 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/frankban/quicktest v1.14.6 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -26,7 +28,8 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/otiai10/copy v1.6.0 // indirect - github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/zerolog v1.31.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/spf13/cobra v1.8.0 // indirect @@ -35,20 +38,19 @@ require ( github.com/stretchr/testify v1.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.40.0 // indirect - golang.org/x/mod v0.26.0 // indirect - golang.org/x/net v0.42.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 // indirect - golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b // indirect - golang.org/x/term v0.33.0 // indirect - golang.org/x/text v0.27.0 // indirect - golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect gopkg.in/src-d/go-git.v4 v4.13.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.120.1 // indirect - mvdan.cc/editorconfig v0.3.0 // indirect + mvdan.cc/editorconfig v0.2.0 // indirect ) diff --git a/hack/tools/go.sum b/hack/tools/go.sum index d45292e9b..c0750a1c3 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -111,6 +111,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -120,8 +122,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= -github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -174,14 +174,15 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-licenses v1.6.0 h1:MM+VCXf0slYkpWO0mECvdYDVCxZXIQNal5wqUIXEZ/A= github.com/google/go-licenses v1.6.0/go.mod h1:Z8jgz2isEhdenOqd/00pq7I4y4k1xVVQJv415otjclo= github.com/google/go-replayers/httpreplay v1.1.1 h1:H91sIMlt1NZzN7R+/ASswyouLJfW0WLW7fhyUFvDEkY= github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks= -github.com/google/keep-sorted v0.6.1 h1:LNEdDKYxoXOrn4ZXC+FdUfJCVbUjhb2QPIBs5XISXCI= -github.com/google/keep-sorted v0.6.1/go.mod h1:JYy9vljs7P8b3QdPOQkywA+4u36FUHwsNITZIpJyPkE= +github.com/google/keep-sorted v0.3.0 h1:nsDd3h16Bf1KFNtfvzGoLaei95AMLswikiw1ICDOKPE= +github.com/google/keep-sorted v0.3.0/go.mod h1:sWIIDzox6hizdt59dsqD2yH800NXYBwRqylmBDs/QXM= github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148 h1:TJsAqW6zLRMDTyGmc9TPosfn9OyVlHs8Hrn3pY6ONSY= github.com/google/licenseclassifier v0.0.0-20210722185704-3043a050f148/go.mod h1:rq9F0RSpNKlrefnf6ZYMHKUnEJBCNzf6AcCXMYBeYvE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -235,8 +236,8 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/katexochen/sh/v3 v3.11.0 h1:L71eTHDOVv7CKiCJVDm28EmIdIT/s8dXzKgddW9Uxok= -github.com/katexochen/sh/v3 v3.11.0/go.mod h1:Z6jje5FhaLunbZtZlnYi6qcaqMO+rdh8m0IJ2jWTbew= +github.com/katexochen/sh/v3 v3.7.0 h1:jrU9BWBgp9o2NcetUVm3dNpQ2SK1zG6aF6WF0wtPajc= +github.com/katexochen/sh/v3 v3.7.0/go.mod h1:DSfEtJYp0xGV3Ex3oTePemLXE4F6rIj9hJtu2uYoDh0= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= @@ -268,6 +269,8 @@ github.com/otiai10/mint v1.3.2 h1:VYWnrP5fXmz1MXvjuUvcBrXSjGE6xjON+axB/UrpO3E= github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -276,8 +279,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= @@ -341,8 +345,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -353,6 +357,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -380,8 +386,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= -golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -428,8 +434,8 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -466,8 +472,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -538,16 +544,14 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b h1:DU+gwOBXU+6bO0sEyO7o/NeMlxZxCZEvI7v+J4a1zRQ= -golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -559,8 +563,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -619,14 +623,10 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= -golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= -golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= -golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= -golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= -golang.org/x/vuln v1.1.4 h1:Ju8QsuyhX3Hk8ma3CesTbO8vfJD9EvUBgHvkxHBzj0I= -golang.org/x/vuln v1.1.4/go.mod h1:F+45wmU18ym/ca5PLTPLsSzr2KppzswxPP603ldA67s= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/vuln v1.0.1 h1:KUas02EjQK5LTuIx1OylBQdKKZ9jeugs+HiqO5HormU= +golang.org/x/vuln v1.0.1/go.mod h1:bb2hMwln/tqxg32BNY4CcxHWtHXuYa3SbIBmtsyjxtM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -813,6 +813,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -845,8 +847,8 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -mvdan.cc/editorconfig v0.3.0 h1:D1D2wLYEYGpawWT5SpM5pRivgEgXjtEXwC9MWhEY0gQ= -mvdan.cc/editorconfig v0.3.0/go.mod h1:NcJHuDtNOTEJ6251indKiWuzK6+VcrMuLzGMLKBFupQ= +mvdan.cc/editorconfig v0.2.0 h1:XL+7ys6ls/RKrkUNFQvEwIvNHh+JKx8Mj1pUV5wQxQE= +mvdan.cc/editorconfig v0.2.0/go.mod h1:lvnnD3BNdBYkhq+B4uBuFFKatfp02eB6HixDvEz91C0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/hack/tools/tools.go b/hack/tools/tools.go index a7e28843b..da96d9ed6 100644 --- a/hack/tools/tools.go +++ b/hack/tools/tools.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // The tools module is used to keep tool dependencies separate from the main dependencies of the repo diff --git a/hack/versioninfogen/main.go b/hack/versioninfogen/main.go index c43e0488b..21183c131 100644 --- a/hack/versioninfogen/main.go +++ b/hack/versioninfogen/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/image/BUILD.bazel b/image/BUILD.bazel index 772801b0c..681c6f385 100644 --- a/image/BUILD.bazel +++ b/image/BUILD.bazel @@ -1,12 +1,16 @@ load("@aspect_bazel_lib//lib:copy_file.bzl", "copy_file") load("@rules_pkg//:pkg.bzl", "pkg_tar") -load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_files", "strip_prefix") +load("@rules_pkg//pkg:mappings.bzl", "pkg_files", "strip_prefix") + +filegroup( + name = "sysroot_tree", + srcs = glob(["sysroot-tree/**"]), +) pkg_files( name = "sysroot", - srcs = glob(["sysroot-tree/**"]), - attributes = pkg_attributes(mode = "0555"), - strip_prefix = strip_prefix.from_pkg("sysroot-tree"), + srcs = [":sysroot_tree"], + strip_prefix = strip_prefix.from_pkg() + "sysroot-tree", visibility = ["//visibility:public"], ) diff --git a/image/README.md b/image/README.md index c0d8c1975..9d9de9952 100644 --- a/image/README.md +++ b/image/README.md @@ -4,9 +4,6 @@ Ensure you have Nix installed. This is a requirement for the following steps. Consult the [developer docs](/dev-docs/workflows/build-develop-deploy.md) for more info. At the very least, `nix` should be in your PATH. -Building the image also requires `newuidmap` and `newgidmap` to be present in the PATH. On Debian and Ubuntu, these can be sourced through -the `uidmap` package. - ## Build You can build any image using Bazel. @@ -19,28 +16,113 @@ bazel query //image/system/... You can either build a group of images (all images for a cloud provider, a stream, ...) or a single image by selecting a target. ```sh -bazel build //image/system:azure_azure-sev-snp_stable +bazel build //image/system:openstack_qemu-vtpm_debug ``` The location of the destination folder can be queried like this: ```sh -bazel cquery --output=files //image/system:azure_azure-sev-snp_stable +bazel cquery --output=files //image/system:openstack_qemu-vtpm_debug ``` -## Build and Upload +## Upload to CSP -Similarly, you can also build and upload images to the respective CSP within a single step with the `upload_*` targets. +Warning! Never set `--version` to a value that is already used for a release image. + +
+AWS + +- Install `aws` cli (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)) +- Login to AWS (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html)) +- Choose secure boot PKI public keys (one of `pki_dev`, `pki_test`, `pki_prod`) + - `pki_dev` can be used for local image builds + - `pki_test` is used by the CI for non-release images + - `pki_prod` is used for release images ```sh -bazel run //image/system:upload_aws_aws-sev-snp_console -- --ref deps-image-fedora-40 --upload-measurements +# Warning! Never set `--version` to a value that is already used for a release image. +# Instead, use a `ref` that corresponds to your branch name. +bazel run //image/upload -- image aws --verbose --raw-image path/to/constellation.raw --attestation-variant "" --version ref/foo/stream/nightly/v2.7.0-pre-asdf ``` -The `--ref` should be the branch you're building images on. It should **not contain slashes**. Slashes should be replaced with dashes to -not break the filesystem structure of the image storages. +
-Optionally, the `--upload-measurements` option can be used to specify that measurements for the image should be uploaded, and `--fake-sign` specifies -that a debugging signing key should be used to sign the measurements, which is done for debug images. +
+GCP + +- Install `gcloud` and `gsutil` (see [here](https://cloud.google.com/sdk/docs/install)) +- Login to GCP (see [here](https://cloud.google.com/sdk/docs/authorizing)) +- Choose secure boot PKI public keys (one of `pki_dev`, `pki_test`, `pki_prod`) + - `pki_dev` can be used for local image builds + - `pki_test` is used by the CI for non-release images + - `pki_prod` is used for release images + +```sh +export GCP_RAW_IMAGE_PATH=$(realpath path/to/constellation.raw) +export GCP_IMAGE_PATH=path/to/image.tar.gz +upload/pack.sh gcp ${GCP_RAW_IMAGE_PATH} ${GCP_IMAGE_PATH} +# Warning! Never set `--version` to a value that is already used for a release image. +# Instead, use a `ref` that corresponds to your branch name. +bazel run //image/upload -- image gcp --verbose --raw-image "${GCP_IMAGE_PATH}" --attestation-variant "sev-es" --version ref/foo/stream/nightly/v2.7.0-pre-asdf +``` + +
+ +
+Azure + +Note: + +> For testing purposes, it is a lot simpler to disable Secure Boot for the uploaded image! +> Disabling Secure Boot allows you to skip the VMGS creation steps above. + +- Install `az` and `azcopy` (see [here](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)) +- Login to Azure (see [here](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli)) +- Optional (if Secure Boot should be enabled) [Prepare virtual machine guest state (VMGS) with customized NVRAM or use existing VMGS blob](#azure-secure-boot) + +```sh +export AZURE_RAW_IMAGE_PATH=path/to/constellation.raw +export AZURE_IMAGE_PATH=path/to/image.vhd +upload/pack.sh azure "${AZURE_RAW_IMAGE_PATH}" "${AZURE_IMAGE_PATH}" +# Warning! Never set `--version` to a value that is already used for a release image. +# Instead, use a `ref` that corresponds to your branch name. +bazel run //image/upload -- image azure --verbose --raw-image "${AZURE_IMAGE_PATH}" --attestation-variant "cvm" --version ref/foo/stream/nightly/v2.7.0-pre-asdf +``` + +
+ +
+OpenStack + +Note: + +> OpenStack is not one a global cloud provider, but rather a software that can be installed on-premises. +> This means we do not upload the image to a cloud provider, but to our CDN. + +- Install `aws` cli (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)) +- Login to AWS (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html)) + +```sh +# Warning! Never set `--version` to a value that is already used for a release image. +# Instead, use a `ref` that corresponds to your branch name. +bazel run //image/upload -- image openstack --verbose --raw-image path/to/constellation.raw --attestation-variant "sev" --version ref/foo/stream/nightly/v2.7.0-pre-asdf +``` + +
+ +
+QEMU + +- Install `aws` cli (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)) +- Login to AWS (see [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html)) + +```sh +# Warning! Never set `--version` to a value that is already used for a release image. +# Instead, use a `ref` that corresponds to your branch name. +bazel run //image/upload -- image qemu --verbose --raw-image path/to/constellation.raw --attestation-variant "default" --version ref/foo/stream/nightly/v2.7.0-pre-asdf +``` + +
## Kernel @@ -49,26 +131,3 @@ We track the latest longterm release, use sources directly from [kernel.org](htt srpm spec file. After building a Kernel rpm, we upload it to our CDN and use it in our image builds. - -## Upgrading to a new Fedora release - -- Search for the old Fedora releasever in the `image/` directory and replace every occurence (outside of lockfiles) with the new releasever -- Search for Fedora container images in Dockerfiles and upgrade the releasever -- Regenerate the package lockfile: `bazel run //image/mirror:update_packages` -- Build test images locally: - - `bazel query //image/system:all` (pick an image name from the output) - - `bazel build //image/system:IMAGE_NAME_HERE` (replace with an actual image name) -- Let CI build new images and run e2e tests -- Upgrade kernel spec under [edgelesssys/constellation-kernel](https://github.com/edgelesssys/constellation-kernel) to use new releasever - -## Adding new packages to the image - -- Find the package (i.e. it's _package name_) on [Koji](https://koji.fedoraproject.org/koji/) -- Add the package to the corresponding section in `./base/mkosi.conf` - - If the package is required to be present in the initrd, add it to `./initrd/mkosi.conf` -- Add the package to `./mirror/packages.txt` -- Update the package mirror: - ```sh - bazel run //image/mirror:update_packages - ``` -- Build new images (e.g. via CI) and run e2e tests diff --git a/image/base/BUILD.bazel b/image/base/BUILD.bazel index 9e613d4fd..9028b8376 100644 --- a/image/base/BUILD.bazel +++ b/image/base/BUILD.bazel @@ -1,6 +1,5 @@ load("@aspect_bazel_lib//lib:copy_to_directory.bzl", "copy_to_directory") load("@rules_pkg//:pkg.bzl", "pkg_tar") -load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_files", "strip_prefix") load("//bazel/mkosi:mkosi_image.bzl", "mkosi_image") copy_to_directory( @@ -31,10 +30,11 @@ copy_to_directory( mkosi_image( name = "base_" + kernel_variant, srcs = [ - "mkosi.finalize", "mkosi.postinst", "mkosi.prepare", - ], + ] + glob([ + "mkosi.skeleton/**", + ]), outs = [ kernel_variant, kernel_variant + ".tar", @@ -45,7 +45,6 @@ copy_to_directory( kernel_variant + "-rpmdb.sqlite-wal", ], extra_trees = [ - ":skeleton", "//image:sysroot_tar", "//image:cryptsetup_closure", ], @@ -86,15 +85,3 @@ pkg_tar( tags = ["manual"], visibility = ["//visibility:public"], ) - -pkg_files( - name = "skeleton_files", - srcs = glob(["mkosi.skeleton/**"]), - attributes = pkg_attributes(mode = "0555"), - strip_prefix = strip_prefix.from_pkg("mkosi.skeleton"), -) - -pkg_tar( - name = "skeleton", - srcs = [":skeleton_files"], -) diff --git a/image/base/mkosi.conf b/image/base/mkosi.conf index 9201a05ff..736665422 100644 --- a/image/base/mkosi.conf +++ b/image/base/mkosi.conf @@ -1,7 +1,6 @@ [Distribution] Distribution=fedora -Release=40 -RepositoryKeyFetch=yes +Release=38 [Output] Format=tar @@ -41,7 +40,6 @@ Packages=containerd # Network Packages=iproute dbus - openssh-server systemd-networkd systemd-resolved diff --git a/image/base/mkosi.finalize b/image/base/mkosi.finalize deleted file mode 100755 index 561db202f..000000000 --- a/image/base/mkosi.finalize +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -# For some reason yet unknown, SourceDateEpoch is not applied correctly to the -# users added by systemd-sysusers. This has only been observed in our mkosi -# flake so far, not in an upstream mkosi configuration. -# TODO(burgerdev): wait for a couple of Nix package upgrades and try again? - -# Strategy: unset the "last password change" date without leaving a trace in -# /etc/shadow-. -tmp=$(mktemp) -cp -a "${BUILDROOT}/etc/shadow-" "${tmp}" -mkosi-chroot chage -d "" etcd -cp -a "${tmp}" "${BUILDROOT}/etc/shadow-" diff --git a/image/base/mkosi.postinst b/image/base/mkosi.postinst index 728e5ad17..62b680654 100755 --- a/image/base/mkosi.postinst +++ b/image/base/mkosi.postinst @@ -15,19 +15,3 @@ cp "${BUILDROOT}/usr/share/constellation/packagemanifest" "${OUTPUTDIR}/" # copy rpmdb to outputs cp "${BUILDROOT}"/var/lib/rpm/{rpmdb.sqlite-wal,rpmdb.sqlite-shm,rpmdb.sqlite,.rpm.lock} "${OUTPUTDIR}/" - -# FIXME(msanft): -# Hack to satisfy Bazel's [output expectations](./BUILD.bazel). -# 2 Bazel packages can't share the same output paths, as it seems, and the -# files being copied around here aren't large, so copying them around doesn't -# hurt. -cp "${OUTPUTDIR}/packagemanifest" "${OUTPUTDIR}/lts-packagemanifest" -cp "${OUTPUTDIR}/.rpm.lock" "${OUTPUTDIR}/lts-.rpm.lock" -cp "${OUTPUTDIR}/rpmdb.sqlite" "${OUTPUTDIR}/lts-rpmdb.sqlite" -cp "${OUTPUTDIR}/rpmdb.sqlite-shm" "${OUTPUTDIR}/lts-rpmdb.sqlite-shm" -cp "${OUTPUTDIR}/rpmdb.sqlite-wal" "${OUTPUTDIR}/lts-rpmdb.sqlite-wal" -cp "${OUTPUTDIR}/packagemanifest" "${OUTPUTDIR}/mainline-packagemanifest" -cp "${OUTPUTDIR}/.rpm.lock" "${OUTPUTDIR}/mainline-.rpm.lock" -cp "${OUTPUTDIR}/rpmdb.sqlite" "${OUTPUTDIR}/mainline-rpmdb.sqlite" -cp "${OUTPUTDIR}/rpmdb.sqlite-shm" "${OUTPUTDIR}/mainline-rpmdb.sqlite-shm" -cp "${OUTPUTDIR}/rpmdb.sqlite-wal" "${OUTPUTDIR}/mainline-rpmdb.sqlite-wal" diff --git a/image/base/mkosi.skeleton/etc/containers/containers.conf b/image/base/mkosi.skeleton/usr/etc/containers/containers.conf similarity index 100% rename from image/base/mkosi.skeleton/etc/containers/containers.conf rename to image/base/mkosi.skeleton/usr/etc/containers/containers.conf diff --git a/image/base/mkosi.skeleton/etc/containers/registries.conf b/image/base/mkosi.skeleton/usr/etc/containers/registries.conf similarity index 100% rename from image/base/mkosi.skeleton/etc/containers/registries.conf rename to image/base/mkosi.skeleton/usr/etc/containers/registries.conf diff --git a/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-bootstrapper.service b/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-bootstrapper.service index 30ca0acfe..76ef974ce 100644 --- a/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-bootstrapper.service +++ b/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-bootstrapper.service @@ -1,8 +1,7 @@ [Unit] Description=Constellation Bootstrapper Wants=network-online.target -Requires=sshd-keygen.target -After=network-online.target configure-constel-csp.service sshd-keygen.target +After=network-online.target configure-constel-csp.service After=export_constellation_debug.service [Service] @@ -11,7 +10,7 @@ RemainAfterExit=yes Restart=on-failure EnvironmentFile=/run/constellation.env Environment=PATH=/run/state/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin -ExecStart=/usr/bin/bootstrapper +ExecStart=/usr/bin/bootstrapper $CONSTELLATION_DEBUG_FLAGS [Install] WantedBy=multi-user.target diff --git a/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-upgrade-agent.service b/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-upgrade-agent.service index ffa204085..c3fefdcc5 100644 --- a/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-upgrade-agent.service +++ b/image/base/mkosi.skeleton/usr/lib/systemd/system/constellation-upgrade-agent.service @@ -8,7 +8,7 @@ RemainAfterExit=yes Restart=on-failure EnvironmentFile=/run/constellation.env Environment=PATH=/run/state/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin -ExecStart=/usr/bin/upgrade-agent +ExecStart=/usr/bin/upgrade-agent $CONSTELLATION_DEBUG_FLAGS [Install] WantedBy=multi-user.target diff --git a/image/base/mkosi.skeleton/usr/lib/systemd/system/export_constellation_debug.service b/image/base/mkosi.skeleton/usr/lib/systemd/system/export_constellation_debug.service index 9b0fccabe..6858dab9b 100644 --- a/image/base/mkosi.skeleton/usr/lib/systemd/system/export_constellation_debug.service +++ b/image/base/mkosi.skeleton/usr/lib/systemd/system/export_constellation_debug.service @@ -3,7 +3,7 @@ Description=Export Constellation Debug Level to Environment [Service] Type=oneshot -ExecStart=/bin/bash -c "tr ' ' '\n' < /proc/cmdline | grep -q 'constel.debug' && echo CONSTELLATION_DEBUG_FLAGS=--debug >> /run/constellation.env" +ExecStart=/bin/bash -c "tr ' ' '\n' < /proc/cmdline | grep -q 'constellation.debug' && echo CONSTELLATION_DEBUG_FLAGS=--debug >> /run/constellation.env" RemainAfterExit=yes [Install] diff --git a/image/initrd/BUILD.bazel b/image/initrd/BUILD.bazel index a959018ec..6301d6cb5 100644 --- a/image/initrd/BUILD.bazel +++ b/image/initrd/BUILD.bazel @@ -1,18 +1,17 @@ -load("@rules_pkg//:pkg.bzl", "pkg_tar") -load("@rules_pkg//pkg:mappings.bzl", "pkg_attributes", "pkg_files", "strip_prefix") load("//bazel/mkosi:mkosi_image.bzl", "mkosi_image") mkosi_image( name = "initrd", srcs = [ "mkosi.postinst", - ], + ] + glob([ + "mkosi.skeleton/**", + ]), outs = [ "image", "image.cpio.zst", ], extra_trees = [ - ":skeleton", "//image:sysroot_tar", "//image:cryptsetup_closure", "//disk-mapper/cmd:disk-mapper-package.tar", @@ -25,15 +24,3 @@ mkosi_image( ], visibility = ["//visibility:public"], ) - -pkg_files( - name = "skeleton_files", - srcs = glob(["mkosi.skeleton/**"]), - attributes = pkg_attributes(mode = "0555"), - strip_prefix = strip_prefix.from_pkg("mkosi.skeleton"), -) - -pkg_tar( - name = "skeleton", - srcs = [":skeleton_files"], -) diff --git a/image/initrd/mkosi.conf b/image/initrd/mkosi.conf index bceb53c8a..9c32e11ad 100644 --- a/image/initrd/mkosi.conf +++ b/image/initrd/mkosi.conf @@ -1,7 +1,6 @@ [Distribution] Distribution=fedora -Release=40 -RepositoryKeyFetch=yes +Release=38 [Output] Format=cpio diff --git a/image/measured-boot/cmd/main.go b/image/measured-boot/cmd/main.go index 1cf708ba3..01a544aae 100644 --- a/image/measured-boot/cmd/main.go +++ b/image/measured-boot/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/image/measured-boot/extract/extract.go b/image/measured-boot/extract/extract.go index cd544a7ae..d96c302e0 100644 --- a/image/measured-boot/extract/extract.go +++ b/image/measured-boot/extract/extract.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package extract @@ -90,8 +90,9 @@ var ukiSections = []string{ ".initrd", ".splash", ".dtb", - ".uname", - ".sbat", + // uanme and sbat will be added in systemd-stub >= 254 + // ".uname", + // ".sbat", ".pcrsig", ".pcrkey", } diff --git a/image/measured-boot/extract/extract_test.go b/image/measured-boot/extract/extract_test.go index ec1b161aa..a21cfe467 100644 --- a/image/measured-boot/extract/extract_test.go +++ b/image/measured-boot/extract/extract_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package extract @@ -114,26 +114,6 @@ func TestPeFileSectionDigests(t *testing.T) { }, Measure: true, MeasureOrder: 5, }, - { - Name: ".uname", Size: 0x22, - Digest: [32]uint8{ - 0x32, 0xd5, 0x9d, 0x99, 0x0e, 0x9c, 0x1f, 0x7d, - 0xa5, 0x54, 0xcb, 0x88, 0x8e, 0x32, 0x38, 0xac, - 0x61, 0x93, 0xe5, 0xe7, 0x23, 0x0f, 0x99, 0xb1, - 0x97, 0x13, 0x8d, 0xd7, 0x23, 0xc0, 0xeb, 0xb6, - }, - Measure: true, MeasureOrder: 6, - }, - { - Name: ".sbat", Size: 0x10, - Digest: [32]uint8{ - 0x66, 0x30, 0xfb, 0x7d, 0x5b, 0xaf, 0x9d, 0x6c, - 0xd5, 0x1c, 0x9a, 0xc9, 0x54, 0x10, 0xe6, 0x8a, - 0xa3, 0xfe, 0xdb, 0x4a, 0xdd, 0xd4, 0x2b, 0x34, - 0x0e, 0x47, 0x11, 0xe2, 0x3c, 0xcc, 0xd4, 0xb2, - }, - Measure: true, MeasureOrder: 7, - }, { Name: ".pcrkey", Size: 0x12, @@ -143,7 +123,7 @@ func TestPeFileSectionDigests(t *testing.T) { 0x69, 0xd0, 0x86, 0xa6, 0xd6, 0x7d, 0x5f, 0xee, 0x88, 0xdb, 0x21, 0x90, 0xc4, 0xa7, 0x07, 0x26, }, - Measure: true, MeasureOrder: 9, + Measure: true, MeasureOrder: 7, }, { Name: ".data", @@ -201,6 +181,16 @@ func TestPeFileSectionDigests(t *testing.T) { }, Measure: false, MeasureOrder: -1, }, + { + Name: ".sbat", Size: 0x10, + Digest: [32]uint8{ + 0x66, 0x30, 0xfb, 0x7d, 0x5b, 0xaf, 0x9d, 0x6c, + 0xd5, 0x1c, 0x9a, 0xc9, 0x54, 0x10, 0xe6, 0x8a, + 0xa3, 0xfe, 0xdb, 0x4a, 0xdd, 0xd4, 0x2b, 0x34, + 0x0e, 0x47, 0x11, 0xe2, 0x3c, 0xcc, 0xd4, 0xb2, + }, + Measure: false, MeasureOrder: -1, + }, { Name: ".sdmagic", Size: 0x2d, Digest: [32]uint8{ @@ -221,6 +211,16 @@ func TestPeFileSectionDigests(t *testing.T) { }, Measure: false, MeasureOrder: -1, }, + { + Name: ".uname", Size: 0x22, + Digest: [32]uint8{ + 0x32, 0xd5, 0x9d, 0x99, 0x0e, 0x9c, 0x1f, 0x7d, + 0xa5, 0x54, 0xcb, 0x88, 0x8e, 0x32, 0x38, 0xac, + 0x61, 0x93, 0xe5, 0xe7, 0x23, 0x0f, 0x99, 0xb1, + 0x97, 0x13, 0x8d, 0xd7, 0x23, 0xc0, 0xeb, 0xb6, + }, + Measure: false, MeasureOrder: -1, + }, { Name: ".pcrsig", Size: 0x216, Digest: [32]uint8{ @@ -229,7 +229,7 @@ func TestPeFileSectionDigests(t *testing.T) { 0xb8, 0x13, 0xb5, 0x31, 0xb0, 0x56, 0x3e, 0x91, 0x20, 0x55, 0x6c, 0xf7, 0x25, 0x01, 0xa3, 0x26, }, - Measure: false, MeasureOrder: 8, + Measure: false, MeasureOrder: 6, }, }, sectionDigests) diff --git a/image/measured-boot/fixtures/fixtures.go b/image/measured-boot/fixtures/fixtures.go index 6e4662037..0e9372594 100644 --- a/image/measured-boot/fixtures/fixtures.go +++ b/image/measured-boot/fixtures/fixtures.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package fixtures diff --git a/image/measured-boot/measure/authentihash.go b/image/measured-boot/measure/authentihash.go index a6facd887..e7e28f65f 100644 --- a/image/measured-boot/measure/authentihash.go +++ b/image/measured-boot/measure/authentihash.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/authentihash_test.go b/image/measured-boot/measure/authentihash_test.go index 9032e6cbf..81cd27652 100644 --- a/image/measured-boot/measure/authentihash_test.go +++ b/image/measured-boot/measure/authentihash_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/measure_test.go b/image/measured-boot/measure/measure_test.go index 7c37fc864..a787d8846 100644 --- a/image/measured-boot/measure/measure_test.go +++ b/image/measured-boot/measure/measure_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr.go b/image/measured-boot/measure/pcr.go index d44ea3271..f90ddeef2 100644 --- a/image/measured-boot/measure/pcr.go +++ b/image/measured-boot/measure/pcr.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr04.go b/image/measured-boot/measure/pcr04.go index 5117860ee..6a343bee9 100644 --- a/image/measured-boot/measure/pcr04.go +++ b/image/measured-boot/measure/pcr04.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr04_test.go b/image/measured-boot/measure/pcr04_test.go index 5b9e7f568..215fb5d8e 100644 --- a/image/measured-boot/measure/pcr04_test.go +++ b/image/measured-boot/measure/pcr04_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr09.go b/image/measured-boot/measure/pcr09.go index 5a6f279e1..8013f3c39 100644 --- a/image/measured-boot/measure/pcr09.go +++ b/image/measured-boot/measure/pcr09.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr09_test.go b/image/measured-boot/measure/pcr09_test.go index 759ad9e33..2462605e9 100644 --- a/image/measured-boot/measure/pcr09_test.go +++ b/image/measured-boot/measure/pcr09_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr11.go b/image/measured-boot/measure/pcr11.go index 6f5d8d789..1fbc37887 100644 --- a/image/measured-boot/measure/pcr11.go +++ b/image/measured-boot/measure/pcr11.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr11_test.go b/image/measured-boot/measure/pcr11_test.go index 5b73aec2e..ca94c16de 100644 --- a/image/measured-boot/measure/pcr11_test.go +++ b/image/measured-boot/measure/pcr11_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/measure/pcr_test.go b/image/measured-boot/measure/pcr_test.go index 37487219f..d05f47b69 100644 --- a/image/measured-boot/measure/pcr_test.go +++ b/image/measured-boot/measure/pcr_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measure diff --git a/image/measured-boot/pesection/pesection.go b/image/measured-boot/pesection/pesection.go index 557851cf8..59b849e39 100644 --- a/image/measured-boot/pesection/pesection.go +++ b/image/measured-boot/pesection/pesection.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package pesection diff --git a/image/mirror/BUILD.bazel b/image/mirror/BUILD.bazel index 014001e9d..56b425add 100644 --- a/image/mirror/BUILD.bazel +++ b/image/mirror/BUILD.bazel @@ -1,5 +1,3 @@ -load("@rules_shell//shell:sh_binary.bzl", "sh_binary") - sh_binary( name = "update_packages", srcs = ["update_packages.sh"], diff --git a/image/mirror/SHA256SUMS b/image/mirror/SHA256SUMS index 796826902..ba49e4366 100644 --- a/image/mirror/SHA256SUMS +++ b/image/mirror/SHA256SUMS @@ -1,360 +1,354 @@ -37abef83e8927b4b48f69fcbdcc249d349c6029cc669401676d01f0ea326999e WALinuxAgent-udev-2.10.0.8-2.fc40.noarch.rpm -03b5889fecc19101fe5c5eb6105bded5705e16a7af957f92b6cc6a7a96e829a8 aardvark-dns-1.14.0-1.fc40.x86_64.rpm -ac860c52abbc65af5835d1bd97400c531a5635d39bc1d68e36a1fe54863385ea alternatives-1.27-1.fc40.x86_64.rpm -b28e7d90ed3aeb6ca10ecb235b70534665011af35bd3677fb836b1e3cfa602a7 audit-libs-4.0.3-1.fc40.i686.rpm -accc1c623cc5345f983990416ad0f8d6028d63bc9f00bcb808688b2a3bb7caf9 audit-libs-4.0.3-1.fc40.x86_64.rpm -e9fd8dd4c9068501c169edb684de7f6e38657548e9d4d1b838a4d6316e9f17cc authselect-1.5.0-6.fc40.x86_64.rpm -db18a583ebde21d8b0b67f0306e25908b273bef9c532469ac0b7ab92578438f4 authselect-libs-1.5.0-6.fc40.x86_64.rpm -6404b1028262aeaf3e083f08959969abea1301f7f5e8610492cf900b3d13d5db basesystem-11-20.fc40.noarch.rpm -156e073308cb28a5a699d6ffafc71cbd28487628fd05471e1978e4b9a5c7a802 bash-5.2.26-3.fc40.x86_64.rpm -208ddebcd5edbff3dca54020a8a180f5410ea9b2d82c733e106992a729b4b84e bzip2-libs-1.0.8-18.fc40.i686.rpm -68a43532d10187888788625d0b6c2224ba95804280eddf2636e5ef700607e7d0 bzip2-libs-1.0.8-18.fc40.x86_64.rpm -1afcf80d5e7b22ee512ec9f24b4f2b148888ef95af3486cf48f2204c3406b12d ca-certificates-2024.2.69_v8.0.401-1.0.fc40.noarch.rpm -a9af8b72483b4ad6489e6d2492161120809453373edaf24722cffb394bd1cd15 catatonit-0.2.1-1.fc40.x86_64.rpm -588a2ab4dd93d58ca8b8d2c2d0b5e2c52007548c3fdd06f5ca1ab415ee236d86 composefs-1.0.6-1.fc40.x86_64.rpm -5935816e8d377d0385e5287ca12e4d3b43e3c3cdc9cc4deafa653a6dba78611a composefs-libs-1.0.6-1.fc40.x86_64.rpm -db246f6445469b5a71e965a081685471768393cf04181e7250ce0ddcb8a9c3d4 conmon-2.1.12-2.fc40.x86_64.rpm -adf4b75cdd9fae9d2d37fb71d9f0bf625a6705c0f0a7784569ab21463fe22152 conntrack-tools-1.4.7-7.fc40.x86_64.rpm -b84841f98b3c14d68da5a4021f10973eb5fa6566e63f4933e40a0c91aa134ce4 container-selinux-2.236.0-1.fc40.noarch.rpm -bbe29e0c7b4ca076d50b4ac3954eb383459230d96b13f353ee71ebd5de33b6d1 containerd-1.6.23-5.fc40.x86_64.rpm -46f035201c84f65f6b9fd549adbe1c4aeb5f5972e69bf6537c05e7cc20587a4a containernetworking-plugins-1.5.1-2.fc40.x86_64.rpm -8c6676fd3d9cb966e68da46dc8bc3a81ea6f2ccb697f75a284a9c9b868489789 containers-common-0.62.2-1.fc40.noarch.rpm -70956b80c95cd582f6d5410c555a1f7468b13c1f022bb25856a6a1a59f4279e3 containers-common-extra-0.62.2-1.fc40.noarch.rpm -299d3e7e1cbc110d9ae8a47f6ca95142c3e3783cb1464bfbd6bc550c414b97ec coreutils-single-9.4-9.fc40.x86_64.rpm -d941a78ffb6e2e0b4c24d0097d0351ced8796edde90208b4bddee459bce0a949 cpio-2.15-1.fc40.x86_64.rpm -faa23cb6a7a612c0a6e874c788c5add967c5e193bd38c2e6093b82b38a162f81 cracklib-2.9.11-5.fc40.i686.rpm -ea1f43ef9a4b02a9c66726ee386f090145696fb93dff80d593ac82126f8037ec cracklib-2.9.11-5.fc40.x86_64.rpm -fe24641e69545c428890a4b094f015c03f65a6c30c3db7bb0de7672bab66bfd6 cracklib-dicts-2.9.11-5.fc40.x86_64.rpm -cb6318cb928c70696f1fc3a79469c0343905b4b69c5d9789e9932b10b4584357 criu-4.1-2.fc40.x86_64.rpm -05a1dbc9f2e3585df8f9930327a7e6f7f59b396359db92787086e1fdb73634b2 criu-libs-4.1-2.fc40.x86_64.rpm -0a8ee60884b6739bc0fffed6c47a94eae524e73cbd9c942420f6ffdfcd39086b crun-1.20-2.fc40.x86_64.rpm -d7a62ff0193375607d28d8fe7eedf3ff5b6ddac154e1474d79787b9f32ae298d crypto-policies-20241011-1.git5930b9a.fc40.noarch.rpm -0f1d436f879fa30f18adca576a1f91bb1a8d1ac42cff5f35c1d15e6dffc506fa crypto-policies-scripts-20241011-1.git5930b9a.fc40.noarch.rpm -26aadc06a9f98c58ca6250d811e749ee5fa76059b37445ec28b50ee73d548174 cryptsetup-2.7.5-1.fc40.x86_64.rpm -765d5162ad7b1eef16a3d3e4285743a3a358552112e7946dcd974e355dc9fa28 cryptsetup-libs-2.7.5-1.fc40.i686.rpm -6559b4d59d898317972e8d728253ba4315578c15fe23553958d6fb8033698794 cryptsetup-libs-2.7.5-1.fc40.x86_64.rpm -20b0f2923feae4c2f1d339e959d3f03d81f8ca985faa05872377b827d6f30467 curl-8.6.0-10.fc40.x86_64.rpm -0dff67dfeca59cb68cadafe8d9909b88dfaa2fc0a9a4426352f66a5fe351fbe3 cyrus-sasl-lib-2.1.28-19.fc40.x86_64.rpm -19197df26f76af5e78bd1e3ad2f777bea071eef6dfec1219f6b8ee3c80e10193 dbus-1.14.10-3.fc40.x86_64.rpm -84ca6055aa354df549fdc78d6d9df692ed4d12c14a489a6d2ce844b5f225a502 dbus-broker-36-2.fc40.x86_64.rpm -81bade4072aca4f5d22be29a916d9d0cfc9262a6c5d92ddfe750f7b8bf03f7c9 dbus-common-1.14.10-3.fc40.noarch.rpm -51bcbc8f98a8abea9a24f3f3988958b965dfe8d6b4201ee046255416fe3ce75c dbus-libs-1.14.10-3.fc40.x86_64.rpm -6a2046c5a6c959cdf30519ec67385faf9b6156ffb3b449c54a0694d4d35985fd device-mapper-1.02.199-1.fc40.x86_64.rpm -33d2dfe3cf355511e6d7bbd1969675b9b73f58bac2a998e9d950ab99d762a299 device-mapper-libs-1.02.199-1.fc40.i686.rpm -f80de90bd6ab88c9071b200c0f0323f80145ca3ab9ac565e8f569390890137cc device-mapper-libs-1.02.199-1.fc40.x86_64.rpm -6913a547250df04ec388b96b7512977a25ab2fca62ed4345c3a9fc8782ce659f diffutils-3.10-5.fc40.x86_64.rpm -cb0736689bd171b6c6ac7a60737fd6b9534c950958ad8e03138068bf9498e0b1 dracut-102-2.fc40.x86_64.rpm -fa40cda554dc644d5a8354b18be748f21996dadd6193ee4ac32c02581266d313 duktape-2.7.0-7.fc40.x86_64.rpm -ac4f1b2eaf5d452512e7b6172c93880c2b501946b71a228adc02d50bb3fb56e0 e2fsprogs-1.47.0-5.fc40.x86_64.rpm -8476fda117e3cb808129ddc2f975069685a8c7875ee04c3dafa6ceed948a2628 e2fsprogs-libs-1.47.0-5.fc40.x86_64.rpm -2e2bf662f060ddd75195e9d3d5f08cdd5d9cc857df3a9bcc45608337ba314a25 ec2-utils-1.2-48.amzn2.noarch.rpm -e6231ec4268b3efa928250eb4106311e0f33396422245b938bfed4ba2d79c573 efitools-1.9.2-9.fc38.x86_64.rpm -6ac676d78c2df896f9794a8dffb75ea69c58d202c68f4bcf084f0d264154a666 efivar-libs-39-2.fc40.x86_64.rpm -75a77fa962df4aabee266a1a48aeb4bf3a164dba31a597af23ab33c693a1c068 elfutils-debuginfod-client-0.192-9.fc40.i686.rpm -4b4b9b5c7e4ceaf65cb473089f87eef07cbdb7254425f60219c5b6412ea7da94 elfutils-debuginfod-client-0.192-9.fc40.x86_64.rpm -633b68d0b697c585727d07f4a3c5e4ba536841a8717eaadc552fc10e61d3b86a elfutils-default-yama-scope-0.192-9.fc40.noarch.rpm -fde3769dc677f0a76ce96058c3032f6553809f4809509842b9523a13e90913e2 elfutils-libelf-0.192-9.fc40.i686.rpm -f7778242b3242fab35a5e0c4f0919ca9823a47c7b296f274ff312cc2c49ba6e7 elfutils-libelf-0.192-9.fc40.x86_64.rpm -a322c6f44f82bc21558f3bb7afc694dbdc28a41340280d3a2c5da2656a339d2f elfutils-libs-0.192-9.fc40.i686.rpm -184235133a6873a42b72d2d88657f12dd4b4b603fd5058fbd02daa0a7308f69c elfutils-libs-0.192-9.fc40.x86_64.rpm -a09546cd7e26c630765f4acb93942a2c70524107a50e20761eb0898b8a80b6cb ethtool-6.14-2.fc40.x86_64.rpm -136ed00bff4d44ab7bd6d0926c4b80c2f5ee9160dc4f698be988ca3d37303b4d expat-2.7.1-1.fc40.x86_64.rpm -849feb04544096f9bbe16bc78c2198708fe658bdafa08575c911e538a7d31c18 fedora-gpg-keys-40-2.noarch.rpm -8f0a56982aa10b607bd3b8009e91bac1a7d9b7ba40eb4435ff9bd9efe7e5e76e fedora-release-40-40.noarch.rpm -dde6f4b5d4415ce20df40cf1cb9ff3015aa5b1896c5b5625e49aa686cdce1d1d fedora-release-common-40-40.noarch.rpm -f133aabe97d4aff7e6c83b8d364fb9805f655b32150e2f6e876958966293e467 fedora-release-identity-basic-40-40.noarch.rpm -e85d69eeea62f4f5a7c6584bc8bae3cb559c1c381838ca89f7d63b28d2368c4b fedora-repos-40-2.noarch.rpm -a6f2098fc2ed16df92c9325bd7459cc41479e17306a4f9cddfd5df8a1b80d0f8 file-5.45-4.fc40.x86_64.rpm -f76684ee78408660db83ab9932978a1346b280f4210cd744524b00b2e5891fe1 file-libs-5.45-4.fc40.x86_64.rpm -063af3db3808bea0d5c07dbb2d8369b275e1d05ad0850c80a8fec0413f47cd64 filesystem-3.18-8.fc40.x86_64.rpm -21725de2a93e1ea19f8d298e32a2428a3a08b9c98f22561cc778a807ed43639f findutils-4.9.0-9.fc40.x86_64.rpm -f4c2d51c7b4577f7b7ef498f8e2afb1b007da2de00cca28e220f50129c40a48c fuse-common-3.16.2-3.fc40.x86_64.rpm -f94315e447afb7442033b7b82e43a4ed62754f603afda53930280300855e46c7 fuse-libs-2.9.9-21.fc40.x86_64.rpm -8fe84b7e0319afcc9c9eb28130b74e0cd7c675667a6ce075eb7ee2ec1b0014c2 fuse-overlayfs-1.13-1.fc40.x86_64.rpm -2d6631d65e3b5c91afdb100a51ee8e50294f0e074a944c1662008d878d47456e fuse3-3.16.2-3.fc40.x86_64.rpm -a9c6502a5b190aaf169e93afd337c009e0b2e235e31f3da23d29c7d063ad2ff9 fuse3-libs-3.16.2-3.fc40.x86_64.rpm -6c80dfdaf7b27ea92c1276856b8b2ae5fde1ae5c391b773805be725515fdc1ac gawk-5.3.0-3.fc40.x86_64.rpm -c4cc69bf3a2655b9ee9ac23492d377bac57811c5b4f81fbf43537520ee33c7af gawk-all-langpacks-5.3.0-3.fc40.x86_64.rpm -21470eb4ec55006c9efeee84c97772462008fceda1ab332e58d2caddfdaa0d1e gdbm-1.23-6.fc40.x86_64.rpm -93450209842a296ea4b295f6d86b69aa52dd8ec45b121ede0d5125aa49bad509 gdbm-libs-1.23-6.fc40.x86_64.rpm -40337d9167737abe23af9c6b586b883f33cc82028d69b29c1d68b524201d9248 gettext-envsubst-0.22.5-4.fc40.x86_64.rpm -554a68e692ccdd0cf71ea67a4c550bac910685465f17eee503732d48ccda9c90 gettext-libs-0.22.5-4.fc40.x86_64.rpm -046971e9f5f0c88737854e1c9e02cce8f5854633575984b235cf3f8b11ec7b91 gettext-runtime-0.22.5-4.fc40.x86_64.rpm -6afaddcff936e900b9d3ab379ddac5c8dd0aa323924b973bf2fa9fc819dc3638 glib2-2.80.5-1.fc40.x86_64.rpm -ee01787bf89f4f3b1ee7dc269844bae2fc672b1af4ac5e241e570391fbeeab3b glibc-2.39-38.fc40.i686.rpm -b64c709529bedb9d3e62c892fe79b67f186c499d54d1e7985713ddb5fded7a59 glibc-2.39-38.fc40.x86_64.rpm -5cdec5bdfe58752fca36279d5cd25e08a6b37a086020f2573937304a5b770dc6 glibc-common-2.39-38.fc40.x86_64.rpm -e8ab57f139980b93c17a9598a40cac5a81be37216526be8115235a57ce5a9df7 glibc-gconv-extra-2.39-38.fc40.i686.rpm -4deff99dffc791e504d9e1d9b5c19f3a053a14291d2168ffd33fdcd24180323b glibc-gconv-extra-2.39-38.fc40.x86_64.rpm -8556a19ce4e3a9977f2d1cdb2f1885eef6401140609264e2c50a8161874d22ac glibc-minimal-langpack-2.39-38.fc40.x86_64.rpm -b054d6a9ee3477e935686b327aa47379bd1909eac4ce06c4c45dff1a201ecb49 gmp-6.2.1-8.fc40.x86_64.rpm -0a8b1b3fb625e4d1864ad6726f583e2db5db7f10d9f3564b5916ca7fed1b71cb gnupg2-2.4.4-1.fc40.x86_64.rpm -4425dbd35ab65f25b092d12ac56c4b565371a1c52ac882c8896dbeae7d52bbb1 gnupg2-smime-2.4.4-1.fc40.x86_64.rpm -74f185a772b22db0d41b69d63d1fa4a78840bc32359ce47729dbd54d297ffa77 gnutls-3.8.9-1.fc40.x86_64.rpm -9a463e49371c05bce6713fd4956ff3784db265485260e6151d3eba444ad41f42 gnutls-dane-3.8.9-1.fc40.x86_64.rpm -f8510eeec17b9258de9a68ce15af21f3ea135b5e767f3bc9047f851d81dbac6e google-compute-engine-guest-configs-udev-20240830.00-1.fc40.noarch.rpm -94e443590221fb17e0330f076ebac32baab17b8d9c22566db372899ae750ca64 gpgme-1.23.2-3.fc40.x86_64.rpm -6d54af0fc5ae216eb97720415acda4245ebc6c021420a2892b58620b5b25ca38 gpm-libs-1.20.7-46.fc40.x86_64.rpm -8e2310f6cde324576e537749cf1d4fee8028edfc0c8df3070f147ee162b423ce grep-3.11-7.fc40.x86_64.rpm -46bc4d8d62eeb1fa3275898d44a30643260f4dda2d3d2a3992f879e4c7df26c7 grub2-common-2.12-16.fc40.noarch.rpm -c508cb9605baa6a0751feda2d23e9a65a4d11538d6d108c295562eac65c63ab1 grub2-tools-2.12-16.fc40.x86_64.rpm -0c6bb04c6e81d368793c03bab2ec736c3ea63aaea9dbb64d4a94b12b35320bdb grub2-tools-minimal-2.12-16.fc40.x86_64.rpm -6a146fa9b154e67eb67eeb258df37814a863997c87171fdc2bd771e5a46b1cc4 grubby-8.40-75.fc40.x86_64.rpm -6dcc2f8885135fc873c8ab94a6c7df05883060c5b25287956bebb3aa15a84e71 gzip-1.13-1.fc40.x86_64.rpm -7ea61bdaada7c1ab5b8567e054a73e2cb3ca6019e3db887049998fed7eea8514 iproute-6.7.0-2.fc40.x86_64.rpm -21d9bc4c677edd86b2e88ebe4c20b097412c2fa3ef4a91d7de0f9b03e1306f5d iproute-tc-6.7.0-2.fc40.x86_64.rpm -21e1196534fbb6d6a4f8d29a1e76518e2740ae53f63080fc811e7ce9cc6d0982 iptables-legacy-1.8.10-7.fc40.x86_64.rpm -6e78bebc0bde8c2f1bd9c4a5f40010a779e7505a0fe87aab516db1bb4a840f02 iptables-legacy-libs-1.8.10-7.fc40.x86_64.rpm -98115e0aa89bc9a8ec66c160af80cf32519db427a83d23d8f3dd9185b3aa591e iptables-libs-1.8.10-7.fc40.x86_64.rpm -99857ab7055ee14a0b4c3a77d1ee9b46217359c708d1bd4efad348595ea974fe iptables-nft-1.8.10-7.fc40.x86_64.rpm -9b4f2730a62955650c1e260e1b573f089355faf0155871e2c10381316a3b2e55 jansson-2.13.1-9.fc40.x86_64.rpm -3a4b45b9f4746a7d3ce8fcb853fa65563051d2f6f31826b8007f6674fe3d8ce1 json-c-0.17-3.fc40.i686.rpm -77e67991fcd4eea31f5b2844898a7854768548f0ab3abf7beaa91526afbf794b json-c-0.17-3.fc40.x86_64.rpm -c8e382e9de90e6946dd9bc2f706d6c307ea4ebba3eca91a283f1bb72b5b3ac9c kbd-2.6.4-3.fc40.x86_64.rpm -4764830a5f91f668f6348539777ddcbc2aa5c21f433f2ef8fec4c385db26233b kbd-legacy-2.6.4-3.fc40.noarch.rpm -61b2303ebb8e37c240b7cf10a8649dbdd2ddefcb571a4e6ea688cc3ad9168970 kbd-misc-2.6.4-3.fc40.noarch.rpm -547bb1919671ef5b3f7ee40dac55d6d681374fe7367fb7c369b02b6287c25fca keyutils-libs-1.6.3-3.fc40.i686.rpm -387706fa265213dc46e4f818f30333cc93f0c54539cbd2ec4db3bc854077307b keyutils-libs-1.6.3-3.fc40.x86_64.rpm -370582ae0ed0dd607ec38e92eba4b124a4b2ec3835f3f4c1e5ad8255ee11d692 kmod-31-5.fc40.x86_64.rpm -42994ac67877595861b55adafd75ab3ce02d397e2ccddac8fb40ec0fecb4436b kmod-libs-31-5.fc40.i686.rpm -53dd95341767a2ea40b68e4621a231883bd5b69426f0920ce1f1ca94e18765cb kmod-libs-31-5.fc40.x86_64.rpm -9a03b21936528f6d08700757cb460c48e9557a71efaaa5e93b01b3f7614320f3 kpartx-0.9.7-7.fc40.x86_64.rpm -cd3402d654af18c421c0ae866ef668094cff5c032bb3f769606261eca8dcf8fa krb5-libs-1.21.3-3.fc40.i686.rpm -878a5a48835ecfec5fa04c7c7a1f24bdae7bd8e9aeca7b3f9dd97f6a23b9b41e krb5-libs-1.21.3-3.fc40.x86_64.rpm -6f2f0a522f2f10f273a77a60fdb7e066c14059d0a3676c9f723162daa7110b42 libacl-2.3.2-1.fc40.i686.rpm -b753174804f57c3c6bae7afeb6145005498f18ae5d1aa0d340f9df5b8d71312f libacl-2.3.2-1.fc40.x86_64.rpm -74d72760c1982830358d676794ee3972ab05550fe7235ae9756a40de8266091f libarchive-3.7.2-7.fc40.x86_64.rpm -e131ab89604dbd4fdc4f80af632099e48bf68bb328dbf0e7dcbef1d1e134dc09 libassuan-2.5.7-1.fc40.x86_64.rpm -67facd893f5082be270d0887a43ba22492c47e652e06e5d53ecd681a1aec8ac7 libattr-2.5.2-3.fc40.i686.rpm -504cff39c51a04c1d302096899c47dc34ac0eba47524c2fc94c27904149e72cf libattr-2.5.2-3.fc40.x86_64.rpm -649cceb60f2e284f8d5dadeec4af8e7035650fe0e5aa75c552354b3fa5708cfe libb2-0.98.1-11.fc40.x86_64.rpm -46e35f0fcf3c5ac842c4fd7ba63c0106b73fa2250a9ae1f8e92c53d552201d7f libblkid-2.40-0.9.rc1.fc40.i686.rpm -f4278c28f2bb21b0c24a5975384a5ccfc0934504d8a7c036ca1346b7683e1b5c libblkid-2.40-0.9.rc1.fc40.x86_64.rpm -c0f6262d325a8a0609935abcb922033c6e56d6b6e5096ba8fbb9c972817e27d9 libblkid-2.40.2-1.fc40.i686.rpm -b506de64d63262d9d957a75fdf2282d82b1e4978cebbdfc191ef93bba37e3b7c libblkid-2.40.2-1.fc40.x86_64.rpm -d8fab37e62c441e5d35421086a20923ff15561ad28c858cfb670b7c095106dee libbpf-1.2.3-1.fc40.i686.rpm -fca2d942f6264b630b33991e48dcb605543a4c837371f28f92994bf956677f24 libbpf-1.2.3-1.fc40.x86_64.rpm -97e9e5339bb0ca6ce3d0195c8ebe48384bcfc087ee6bc7a35b1d27d4de23fbfa libbrotli-1.1.0-3.fc40.x86_64.rpm -8163fa05deeb06414a6e9ad78ba8d3e65eab9af87f35f0790f8c81bee8359ab6 libbsd-0.12.2-3.fc40.x86_64.rpm -0bdb66863b60abc8c2ca540e80ef58e9d4da3f700b685ecc49042616387dee8d libcap-2.69-8.fc40.i686.rpm -6c92fc0c357964d2b57533a408ec93b7fe5214c1f0b63a6b1c0564b2ba5c481f libcap-2.69-8.fc40.x86_64.rpm -9b11d2f4bd3e8fd56028deb22e86e175c5d91017db81413f108f0841a9a02349 libcap-ng-0.8.4-4.fc40.i686.rpm -dc22477c3ac762f92ecc322af4f39fee2c5371bedc495ce242f9b94c590c580f libcap-ng-0.8.4-4.fc40.x86_64.rpm -274fd72d27570f3fcc9f06efedd21ea7a71e0903c286222fdbbefd6b30b9a80c libcbor-0.11.0-1.fc40.x86_64.rpm -c890a19d2c4a3da836bae1db40b778fe0339cd0d26bddfbe584aaccb1a0f1485 libcom_err-1.47.0-5.fc40.i686.rpm -0d100701976c37fe94e904ed78437db7477ae1dc600ece07bea23fbbd968762c libcom_err-1.47.0-5.fc40.x86_64.rpm -7583f6b188f19e5112403ad53d0e5c98d35a5b736d355ea91d4a35ade5ef014c libcurl-minimal-8.6.0-10.fc40.i686.rpm -e3dc770fc4c48bec2da9ac948bcd43e053608d0397ad0a57056409a7d427289d libcurl-minimal-8.6.0-10.fc40.x86_64.rpm -700d56839e1bc16c08f71c505a7e62f655e4c18f4bf71bf2f36f3854f829e6f5 libeconf-0.6.2-2.fc40.i686.rpm -2ef764049e121ee2a9fa5d0296e6e2dd0abc7541040b8e49d67960bd9bde74e4 libeconf-0.6.2-2.fc40.x86_64.rpm -7a91572e9639617937c13cd103ac5571075f37fa533d796be108aa8a4937432d libedit-3.1-54.20250104cvs.fc40.x86_64.rpm -c4adcee5dd9e22ea50d6c318ac4936a8df708121741958ce5aa8f038c46c61a9 libevent-2.1.12-12.fc40.x86_64.rpm -a1ba3045c99ef1b266383f0801731a68f9e0cb069a6c808267ad33b759381907 libfdisk-2.40-0.9.rc1.fc40.i686.rpm -17f02ca51b90580887d739f52b995034e0929fc6bcd92be308554a2f5337bbe4 libfdisk-2.40-0.9.rc1.fc40.x86_64.rpm -34db4e48052a47e1c6f445dec3edbdb279890ae24c5ecfa44e8a19fdee0a99a2 libfdisk-2.40.2-1.fc40.i686.rpm -aa6a51bbe265bb3d3a50c37557f6513d51298301e4957ce4484e56feb837fa32 libfdisk-2.40.2-1.fc40.x86_64.rpm -25caa7ee56f6013369c2fac26afd3035a7d580af0b919621ba8d495d13a5af86 libffi-3.4.4-7.fc40.x86_64.rpm -f9c5369b6d168a2b8e46159bc41ef0755ee1a8d12f4c6766fdfe23e827cf5cdf libfido2-1.14.0-4.fc40.x86_64.rpm -460a36745833f629ac1f5d232ec0daec092b7cb654a4bf3e4fde7c693fea9fbb libgcc-14.2.1-3.fc40.i686.rpm -cd073c42cb4dfcd224e9b4619883f2c7923ab0b083d7c90b01e3052c89f6b814 libgcc-14.2.1-3.fc40.x86_64.rpm -10c4c12c6539ffea68974cd9b57013d471ac35fe3bef4833c0a22f6b29fbf489 libgcrypt-1.10.3-3.fc40.x86_64.rpm -03d5f4d139dec2e7c94714b1b9f59d37236dbda9f09271bdda99c71251f15f0e libgomp-14.2.1-3.fc40.x86_64.rpm -8d0a9840e06e72ccf756fa5a79c49f572dc827b0c75ea5a1f923235150d27ae2 libgpg-error-1.49-1.fc40.x86_64.rpm -4fdafe5a28dc18a892713cc2071a46cbcb6561c9c62e10f20f04b0e562187228 libidn2-2.3.8-1.fc40.i686.rpm -63a08c0cf18474582a3e62367b5b4275d079e883e40f4cf32cab7afc316ec2dc libidn2-2.3.8-1.fc40.x86_64.rpm -98b0d9d25bd93c7061ce50480e214944a02d7de725e1d31f4461604380ffb74a libkcapi-1.5.0-4.fc40.x86_64.rpm -84977f5f157172dc7642a3f6602692bb6323b4b106c69f7081882e6c6a81a346 libkcapi-hasher-1.5.0-4.fc40.x86_64.rpm -906bb224af7b2e1ea64c258c6978a610b899b0af5be572ce1c09e36ec58b8a79 libkcapi-hmaccalc-1.5.0-4.fc40.x86_64.rpm -a77eed0fe1b84c11f9175f4642db058753d4eaa1f88e999f01df72e1d10a3826 libksba-1.6.6-1.fc40.x86_64.rpm -d5d9c95e38aeb8c852cda4516057d86a5fec2485cb3413067d625059a4d97b30 libmd-1.1.0-4.fc40.x86_64.rpm -7b307e95fb7584889d35108de86ebfa34d0aea6eabb5a68d574647f83f25ed77 libmnl-1.0.5-5.fc40.x86_64.rpm -4aa44fc80c1d3e3496a406740b9ae3ada3df28b37fecc611e44183f542758f76 libmount-2.40-0.9.rc1.fc40.i686.rpm -c5231753426984926daba70cc61d20e777048e91167f2e3c217e21e9205573a6 libmount-2.40-0.9.rc1.fc40.x86_64.rpm -76ae4cbab05343ba545c2c3898153493f0663a2f6ff382df464cebe9d260bab4 libmount-2.40.2-1.fc40.i686.rpm -a695daa293bb78b033a2629f5af1284fe212b748227e94efa59a8292eb6b9f40 libmount-2.40.2-1.fc40.x86_64.rpm -157c1256a9529dea2215f6c77f40647baf19c6f8ac6058934c0f2a593f436c4d libnet-1.3-3.fc40.x86_64.rpm -a2d50812dec895ba654fc424a458e99ceb81423046ad870ffabfef3081382ef6 libnetfilter_conntrack-1.0.9-5.fc40.x86_64.rpm -3de45cea1d877e58f1a8fadb3902e585c070bfd2813bb107145a2f28a2e6edb3 libnetfilter_cthelper-1.0.0-27.fc40.x86_64.rpm -aa2f6d8990f059ba985681d8a6d1695730bbe798014d5bd4b6aa112f79b386b3 libnetfilter_cttimeout-1.0.0-25.fc40.x86_64.rpm -ad8d041ab07f62567f80e9a751529091f591a542dd91b6473b7cba5749b56d69 libnetfilter_queue-1.0.5-8.fc40.x86_64.rpm -78055d1a143d118b9b5513e6621c31d19858c593427b6343a42655eb147a44ed libnfnetlink-1.0.1-27.fc40.x86_64.rpm -db4841a294c5ec3759d77f356a05d0c7f852270aa75c900daac4992f12147dd3 libnftnl-1.2.6-5.fc40.x86_64.rpm -0e9b7c72112f58e83d66422eb2d77d346dc0810cdb652906f0d0fcbd9799fc7d libnghttp2-1.59.0-3.fc40.i686.rpm -550160732fc268914a422cfddc3c745febf8da161f8eacbce8649c67117b1476 libnghttp2-1.59.0-3.fc40.x86_64.rpm -d7062104274c9b8eae85b7c199a69c2f8692c17f31d3fdf4364b53f6a3553e9c libnl3-3.11.0-1.fc40.x86_64.rpm -fa6dccd7aee4a74a5cfa12c7927c7326485704ebe57c54774b0f157fda639360 libnsl2-2.0.1-1.fc40.x86_64.rpm -9e27ce1072ef67dd8877175f9a7daa1bcddbbcec3fd6f161e6bc2f2b453c360b libnvme-1.8-1.fc40.x86_64.rpm -bb9ceaba0d3283777777524e8c99b8eaa2155e9000d8e3ef5d0ece336f8c1392 libpsl-0.21.5-3.fc40.x86_64.rpm -87e8725c378e16a983abee0b8bfbdaf2214f32b55c822741e627db34427ed9a3 libpwquality-1.4.5-9.fc40.i686.rpm -210e797a265da7111c1a59eca95f9e301ad05c5c8772aed54af9363e5684950b libpwquality-1.4.5-9.fc40.x86_64.rpm -571fad7baa286ca36a2b2cdb171d22142ba82b99663ec0408b5db99514773956 libseccomp-2.5.5-1.fc40.i686.rpm -91668f5d08a663948c7d888d7cdef3248285c5d9fbe369ae031d7ca31c6e398c libseccomp-2.5.5-1.fc40.x86_64.rpm -e4d4e12303eeee24bd19b7c1010abf5a275577f5c6aa59ccbd15887e0f5f09ee libsecret-0.21.7-2.fc40.x86_64.rpm -69161fabb22dd4c5c8aeab0b6465dafe06117ce5173aaf4dce425a10cb11c434 libselinux-3.7-5.fc40.i686.rpm -2070bdf786c926400739254f08568ccf564ce613ddacacb36b6a9a499345aa5e libselinux-3.7-5.fc40.x86_64.rpm -aca271d814ee3be14c09963985011c201315a186d3e3b634af8d59cd5eb01208 libselinux-utils-3.7-5.fc40.x86_64.rpm -e200b862d5063f6e85859c5be99c50d5636edae91bd3f603c3a22383b7e2ac88 libsemanage-3.7-2.fc40.x86_64.rpm -a4cd1c54d0f8b543ffa7cc6ce366a6a3f233e084f2e52ea07a70da6127347b8e libsepol-3.7-2.fc40.i686.rpm -85cbaeca877a166cda9637a8ea0d43dd63488fdcc250fe564696cf8beaf8913f libsepol-3.7-2.fc40.x86_64.rpm -716b91d85eb887fe10db607608294475289b9e9fc4d51fbddcf24046ea016147 libsmartcols-2.40-0.9.rc1.fc40.i686.rpm -34111597814e385c8c1cdd48ff72c4ed64e7e6ed9bd6660bb2bfda6aebdb3200 libsmartcols-2.40-0.9.rc1.fc40.x86_64.rpm -e9c3e9e3458af7a2f9b5cd6bc45020bb7f2c6cfbd0429b0b1853928bd3e02004 libsmartcols-2.40.2-1.fc40.x86_64.rpm -45d032fb4d59ee0f6a921dd1f0addfcdd38fc46917243fdd6248194ffddb9067 libsodium-1.0.20-1.fc40.x86_64.rpm -c8bbfa2762cc601f8a97d8d5a39a658f0e91ba477ebebd798b30f7fc8ffdd457 libss-1.47.0-5.fc40.x86_64.rpm -89e7282e0a94d641871dfed423ba2ce6f8b088eaf9aabdea1805708bcafa6a01 libstdc++-14.2.1-3.fc40.x86_64.rpm -3d6ff1e90b4b19de401ab45df9c5bb6e171c34a5b415a7e10e3282332a4cda95 libtasn1-4.20.0-1.fc40.x86_64.rpm -9ca680998686ee852fa8e1667cd6e7c436bfd5fe7da898bd314d808303d447f8 libtextstyle-0.22.5-4.fc40.x86_64.rpm -189e8f25a80a67db1722cb42f2800235df3eadd9cb93d3bcd13853bf09122d5f libtirpc-1.3.6-1.rc3.fc40.x86_64.rpm -e5d150d23f95e4a23288b84145af442607a88bf457c0e04b325b1d1e8e708c2b libtool-ltdl-2.4.7-10.fc40.x86_64.rpm -e541a1c8397dccf159b3602eb6bbb381ba21c544db337a3b3bfc49ccc2ef5c21 libunistring-1.1-7.fc40.i686.rpm -58719c2f205b23598e31b72144ab55215947ad8fca96af46a641288692c159d2 libunistring-1.1-7.fc40.x86_64.rpm -0fa1b7d1f6f5bcd1c2f2785e6571a7c3e63662efd50ba32fd1996dac9dbb4de9 libusb1-1.0.28-2.fc40.x86_64.rpm -896d671852ed3f28e8c778dca361c5b5c57a89855df11755e6be1d088f64d43a libutempter-1.2.1-13.fc40.i686.rpm -0093a8d3f490fbbbc71b01e0c8f9b083040dbf7513be31a91a0769d846198c1b libutempter-1.2.1-13.fc40.x86_64.rpm -5aaa12bba361ae29b2a6b35c4b21da935423bc2ad763eaa8267008c7a533cb3c libuuid-2.40-0.9.rc1.fc40.i686.rpm -18ae5558dd719fcc92bd4d2c7c73c6a093af82a35c67444ccc9cdf4b3dce1824 libuuid-2.40-0.9.rc1.fc40.x86_64.rpm -7273be2566ee7c865ca3154715e2f7fa938e852bc6114af9cb8530fa88d833ca libuuid-2.40.2-1.fc40.i686.rpm -b6db3e72ae6575127216145c1f65414ea94acd9db26d08c5081cb5d786101c1f libuuid-2.40.2-1.fc40.x86_64.rpm -bea578631618692ba5e302beadfdf6d5894e23e5bddaea4b4fca2f377dd1aaac libverto-0.3.2-8.fc40.i686.rpm -fadf7dd93c5eee57ba78e0628bf041dbd2ea037ace52f0a5cbac55b363234d27 libverto-0.3.2-8.fc40.x86_64.rpm -c87a32fd07fd5be227320177ddf61b89c9f14f06d0895e9ecb9a9977b8f6495b libxcrypt-4.4.38-7.fc40.i686.rpm -27f89188ec9f4b1e1e96275f7d2760c342b4cf0a28e7cbccd893d98418d1d060 libxcrypt-4.4.38-7.fc40.x86_64.rpm -a17f9a8894a00ee97a42219b3b21d64bfb850d74059d89ae299210bc477e8967 libxkbcommon-1.6.0-2.fc40.i686.rpm -1f1d0c1e1132016735acc6fc3390102b35f9eb257244547c7b61c32a9c2314cc libxkbcommon-1.6.0-2.fc40.x86_64.rpm -12fa7bdef4a5d95b78a38152a0c90b42c0cfc1a1b7c80fa25f4ccdb7c13cf849 libxml2-2.12.10-1.fc40.i686.rpm -a8ee5e5e972ac86d383bf2798db45f41a22b23d76a0fdef698ddd92076589ff5 libxml2-2.12.10-1.fc40.x86_64.rpm -9007aa6bc776262992172e3f24ea2528cd4f65fb82f2c2d01e27f53f79c5c6f7 libzstd-1.5.7-1.fc40.i686.rpm -ec5650e3822d102bfe6bbebc7468a711128ef695f4bd06748ce242b8378d8b7b libzstd-1.5.7-1.fc40.x86_64.rpm -81409455da42a5ffdcf5b8cc711632ce037fec25d5ae00cbfda5010c9db04157 lua-libs-5.4.6-5.fc40.x86_64.rpm -2d1da8faf26c647a7299f840cfa199f20415ceb99a4f694ac3cd07f645f02cf5 lz4-libs-1.9.4-6.fc40.i686.rpm -f5f022440c4340b5e7fb1c1dbc382e6b0fd57030b3ff056940f2bb3d254408ec lz4-libs-1.9.4-6.fc40.x86_64.rpm -55d5c4384bdc13290a7824aa566d1d20bbbc99f5dde4e057bd12f4f47845e28b memstrack-0.2.5-4.fc40.x86_64.rpm -2030e8622b6f9ceb4b56f5c771ae7e4ccbff3d8bf563df5bf929725f4c4f18c9 mkpasswd-5.5.20-3.fc40.x86_64.rpm -03fbefea8c8d8465cf1caf66870fb935292ee18b4ca341853b5576ca9c7801eb mokutil-0.7.1-1.fc40.x86_64.rpm -0a3a3fc2471d2d64cbc85f4b23c93620df6eeee814851a2b69fc5ddf75406b56 mpdecimal-2.5.1-9.fc40.x86_64.rpm -bc873693a8b8423d7f82e329abe207c9160a4c746fea9a32ef2a6ae8c912f227 mpfr-4.2.1-4.fc40.x86_64.rpm -8a7312e49b3ddec619dee7d1067b72f9105f34d9ff988be0e8b8a76091a8b8fa mtools-4.0.48-1.fc40.x86_64.rpm -7dfae7d898dfc40f3fe1fc66104cf31e434e866fec4d4944b55952d7f2f16657 nano-7.2-7.fc40.x86_64.rpm -b404c27af03bb1e43fb0dc472d5a1fa152e0563fa2e4eefa29199c47578a829b nano-default-editor-7.2-7.fc40.noarch.rpm -8a93376ce7423bd1a649a13f4b5105f270b4603f5cf3b3e230bdbda7f25dd788 ncurses-base-6.4-12.20240127.fc40.noarch.rpm -39bba59320e6276a3b7b07bc94d319511bdd7d32ba098fd49723f4d542794d41 ncurses-libs-6.4-12.20240127.fc40.i686.rpm -a18edf32e89aefd453998d5d0ec3aa1ea193dac43f80b99db195abd7e8cf1a04 ncurses-libs-6.4-12.20240127.fc40.x86_64.rpm -60d1e0058d38ab2ea6b08f59341e7db34c8bec37a387ad5c0565bbc38d5170fd netavark-1.14.1-1.fc40.x86_64.rpm -16172412cfd45453292e18f84fc57e42a3ce92aca72b47ef7e15b44554049cfe nettle-3.9.1-6.fc40.x86_64.rpm -188ce5004e6ed764b4a619b64a4a0f36f1cc4fa919fe0a300599ff1171844144 nftables-1.0.9-3.fc40.x86_64.rpm -784e0fbc9ccb7087c10f4c41edbed13904f94244ff658f308614abe48cdf0d42 npth-1.7-1.fc40.x86_64.rpm -f814bc09b50daaab468715088ec056373dbc209a5075306e4ce76f5c55eb2b42 nvme-cli-2.8-1.fc40.x86_64.rpm -a0eecb082db491d57bfab6047b2611a10150d47a8f50fd05f98ad2f01b0dee54 openldap-2.6.9-1.fc40.x86_64.rpm -49e3e1c7d82ab28b1ab79bb2655a95cadeae2295f5543db8a07d74e090bcb90d openssh-9.6p1-2.fc40.x86_64.rpm -369b4d6e159a53afe9d2803d927f0523ceeae0822353aa8f0d81d0e3211b0788 openssh-server-9.6p1-2.fc40.x86_64.rpm -bffa85f8feadf0bf5f7a8cea9ff9f5e49266959df6ae4d61cf929054c09ec2f8 openssl-libs-3.2.4-1.fc40.i686.rpm -a1b67803e7afb5e16d977e49b8e63c50537bbaa6b261ab10348d55a54b1562dd openssl-libs-3.2.4-1.fc40.x86_64.rpm -9f0336deb6f1b1524ec48d837622e7e2291995369b0356d7ad1e1d427f3b659a os-prober-1.81-6.fc40.x86_64.rpm -70fba929aab38a9d69a457cef1b01962161a1df2b78dc5a4e86ff4b994b51079 p11-kit-0.25.5-1.fc40.x86_64.rpm -c728dbd90872b7597a8ace70a70555bff576231bb6dbde14b75626d601706af8 p11-kit-trust-0.25.5-1.fc40.x86_64.rpm -b3b261e448a25c6550f050ca1813509dd6edbb10f22c02a535548332435b6bc4 pam-1.6.1-5.fc40.x86_64.rpm -753d7b5a6531eec7689414dc1a4ce76ba4d327b8ad0363a9298ee67b565c1d95 pam-libs-1.6.1-5.fc40.i686.rpm -6ca8efd0b2a26cc51917c1c81260d919ef7760f0e0770dc872a78b1b829299cd pam-libs-1.6.1-5.fc40.x86_64.rpm -9bbce784622e02af0371ced8e9a7d26adba7eabd66ecfcb8bbe2d24cf616e3c1 parted-3.6-4.fc40.x86_64.rpm -fa10fa559403d57df8c8dabd9cfd765f020216ca03d2116c861aa7cf7a97b27a passt-0^20250415.g2340bbf-1.fc40.x86_64.rpm -b8892365092573b21fc84ebd084b20f6f62e848ac19720ea8a9e0c2fd64176c4 passt-selinux-0^20250415.g2340bbf-1.fc40.noarch.rpm -a0fb808d6b7ff8cd9cfdc1a60f213851cecdcace334d6e5aa1e0e54b81d79a25 pcre2-10.44-1.fc40.i686.rpm -73e50df09266fcffda9c24a3738f579dd365c2c187c294da054ef9915edc3851 pcre2-10.44-1.fc40.x86_64.rpm -dbec699e88d42fc6fb1df0a8c0b9023941ed1b1b7625694253a612eaf9f2691d pcre2-syntax-10.44-1.fc40.noarch.rpm -d207e7cdb8602403c8aab36c1342f55bcb4503bf4e296d11dae013e6fd9ac920 pcsc-lite-2.0.3-1.fc40.x86_64.rpm -cbd3f6cdbb19126dc703f140fafcac84d0d2ef63b54dfa08332d4bce2def076f pcsc-lite-ccid-1.5.5-3.fc40.x86_64.rpm -f796a31cad58f4ebea8787020868581d9a721297ee0ef6a7c63a7f8444f60c17 pcsc-lite-libs-2.0.3-1.fc40.x86_64.rpm -5443db8875acc0c1c436dbe1ed62b776543e049b8d9c7e33198379d367814093 pigz-2.8-4.fc40.x86_64.rpm -cb7c5036f1d25c696de23a6670cb64caec9945116fb0c9a93555414746ecf253 pinentry-1.3.0-2.fc40.x86_64.rpm -bbb4abafa9f7664e21350b56d49af2c928288e6d4dd68c304c4ab5d45b2c8ad7 pkcs11-provider-0.3-2.fc40.x86_64.rpm -ecdf5f33e98a3c94426efc2176d382899d08d632e05dafc2e7fd18549337ef75 podman-5.4.2-1.fc40.x86_64.rpm -f6291fc1fd3ececcd23c9e693ae0d309d66d57cc2de5d3d389235604804c1c2a policycoreutils-3.7-7.fc40.x86_64.rpm -30a4f9d3631aaa1280c93ce4305847a9773973aa312e1802d1cd676cb2421689 polkit-124-2.fc40.x86_64.rpm -f47bc65177a8b160916c00df9c84442afa1dd353880b3c0503d5a0b052d4956c polkit-libs-124-2.fc40.x86_64.rpm -b7decdd8a6fcb175fea2bb39bb1dbecad1ba820c365bab5a273a7b3982e55157 polkit-pkla-compat-0.1-28.fc40.x86_64.rpm -c03ba1c46e0e2dda36e654941f307aaa0d6574ee5143d6fec6e9af2bdf3252a2 popt-1.19-6.fc40.x86_64.rpm -8a414572157d7e450eddcdc909521e09373289cc7a48ebc15f7b0c9922c17262 procps-ng-4.0.4-3.fc40.x86_64.rpm -af85755cda79959a19161ebc26a45e507003298bd97b472b9ab0d512afa5e46a protobuf-c-1.5.0-3.fc40.x86_64.rpm -45ff2e9814aa059f323b23710c73309d41d36306667a3004f5fbb86b0cab4484 psmisc-23.6-6.fc40.x86_64.rpm -c000cbb0a7df2c0c61559ab3f3732eacd163b171673298f4ec043cb6d223f364 publicsuffix-list-dafsa-20250116-1.fc40.noarch.rpm -7c703b431508f44c5184b5c1df052ed0f49b7439d68aa3597a9a57a5b26bd648 python-pip-wheel-23.3.2-2.fc40.noarch.rpm -bc9b15b36777510ccc9ddc3da363a3100990a235e998f3a50743cdfa50e92f5e python-unversioned-command-3.12.10-2.fc40.noarch.rpm -b0eced6eca5856ed3ddd031e8010e91975149cecf4b337fd3a8c82759a2344c9 python3-3.12.10-2.fc40.x86_64.rpm -c59ab2ff672f5e2d1be59dd22ebbe3fc86c88aca95247b1f84c9bf3d5b6c7026 python3-libs-3.12.10-2.fc40.x86_64.rpm -b593f10e736995a0a898d37a1e595ad04bf162e7f0e7c9994e3032a9d9bc8799 qemu-user-static-8.2.9-1.fc40.x86_64.rpm -72b6185e59a7b359df273e9fe27dc014c0856341abcd1c907c7810a77cc6980e qemu-user-static-aarch64-8.2.9-1.fc40.x86_64.rpm -7eace5bc3e601266452b8356207604012542a8cd019b576e6d2ab9306c6a8e4c qemu-user-static-alpha-8.2.9-1.fc40.x86_64.rpm -6baadf76d6111fabe0c40d9ef8956396ffd23175dadceff940356e1d447c9f0b qemu-user-static-arm-8.2.9-1.fc40.x86_64.rpm -284850c3af8faa4b7ae51b5d2c1c597e783945a20b950b7bc0a925d02538ced3 qemu-user-static-cris-8.2.9-1.fc40.x86_64.rpm -8b5d75d2839682691d130409ebf7142f3fafdb970348f3b5336876d38d4ab0b7 qemu-user-static-hexagon-8.2.9-1.fc40.x86_64.rpm -fef5e7fef2df6f85092383de73373e5d9c9eb241232281bb94c068564c7306de qemu-user-static-hppa-8.2.9-1.fc40.x86_64.rpm -742c694b0e8cbe8bafe2b0602bb38f17c31b80c04f7eb6ee7f104e97d60a1e48 qemu-user-static-loongarch64-8.2.9-1.fc40.x86_64.rpm -872255f64d77ccc3a5388ace420e1434c504e79e31442f58a1516ff752697641 qemu-user-static-m68k-8.2.9-1.fc40.x86_64.rpm -314453265e4fe55fd7b02e01d17c14c4f68ff23bf652ca251915d65ff9006cc4 qemu-user-static-microblaze-8.2.9-1.fc40.x86_64.rpm -decbbd27625256fe9930c908eaf18e6c852f15e47913b7ca071bea091737d5c8 qemu-user-static-mips-8.2.9-1.fc40.x86_64.rpm -f7d088b6b21089c66c24317de994bc65c5ed0f7be484518e72aefe0f74aaf959 qemu-user-static-nios2-8.2.9-1.fc40.x86_64.rpm -58129a9a88a231446ec2fba60c378438d0020e3c7d72186f40d65d06be12eb93 qemu-user-static-or1k-8.2.9-1.fc40.x86_64.rpm -e1c1dc5e5b1d39b52b57c2ebfbbbaa4ece91d986ee8065547bcfb2f6b4707b92 qemu-user-static-ppc-8.2.9-1.fc40.x86_64.rpm -8863c1d75d185ed71bcba9496d353467d064669a3825ab5fda7b4f964bd1a76f qemu-user-static-riscv-8.2.9-1.fc40.x86_64.rpm -d8e5cbc0354aef6bca8600aeeb5f64b4833bb3f90466e64c37695fd7822c5f4b qemu-user-static-s390x-8.2.9-1.fc40.x86_64.rpm -71663f59440aae47599d857f89fb2149952ec4f1bb1afbd83f7d6ea00ccc393f qemu-user-static-sh4-8.2.9-1.fc40.x86_64.rpm -bfef80139dc556e1834b424a346a69fbdc2c706e0158402d8ea6c42e04123bb3 qemu-user-static-sparc-8.2.9-1.fc40.x86_64.rpm -9a0f90c77539963e1657148b3fda7b91285abfbfba52893735b36971df338b6c qemu-user-static-x86-8.2.9-1.fc40.x86_64.rpm -ccf7dd55fc0101233b3384077b544b8fcf658b3febd0e062a687a7537dd9618c qemu-user-static-xtensa-8.2.9-1.fc40.x86_64.rpm -8d50fba416f81e4091b144748fff22665ee88699fdc4a372b905d999d05fd3e8 qrencode-libs-4.1.1-7.fc40.i686.rpm -93781052576cc40a2c203bbc1bf865189a11b2c82436e614da9811baedc082fc qrencode-libs-4.1.1-7.fc40.x86_64.rpm -3527582fddcb54892228658b3929ffbb89766941a9794e726216e0800ac05721 readline-8.2-8.fc40.i686.rpm -dacd59edbe4744fd9f6823d672e01eff89f871e88537554f16c0a275a17d04e9 readline-8.2-8.fc40.x86_64.rpm -2fbe0a8f9925ba12b4307fbed8c5c148bab91835f1a3e8797ee08d94d2a0bf83 rpm-4.19.1.1-1.fc40.x86_64.rpm -c48c149f4aebfe44d649eea6f7a8eaa229dc8db71ff70b66c7403aa9bd072820 rpm-libs-4.19.1.1-1.fc40.x86_64.rpm -7bebda41ea91faf8cf8911a403c051eb59d444e60f8091d14d10987b713f39ff rpm-plugin-audit-4.19.1.1-1.fc40.x86_64.rpm -d400a4e4440bea56566fb1e9582d86d1ac2e07745d37fa6e71f43a8fea05217c rpm-plugin-selinux-4.19.1.1-1.fc40.x86_64.rpm -ce3b3148bb617e132c2ae9a28cc9f1990f806bc45722489f4c09f4d90821b6cd rpm-sequoia-1.7.0-5.fc40.x86_64.rpm -216aedc28a4144469041eade68f57149e2a7ab91c5f4f46eba18b6fc6effcb73 runc-1.3.0-1.fc40.x86_64.rpm -5dbd069183076ed8048c839c31f713c0f6080fb9ebfdda92ac550030688e811b sbsigntools-0.9.5-6.fc40.x86_64.rpm -6a21b2c132a54fd6d9acb846d0a96289ab739b745cdc4c2b31bdbf6b2434a1a7 sed-4.9-1.fc40.x86_64.rpm -4ea87bc61621f2465a6db2bf14144acd37230132fc84a2d3af485067646e37e1 selinux-policy-40.30-1.fc40.noarch.rpm -428f1d37fc81965af5962d65b7232b6425db2ff3b3778475756e690acb04a51d selinux-policy-targeted-40.30-1.fc40.noarch.rpm -89862f646cd64e81497f01a8b69ab30ac8968c47afef92a2c333608fdb90ccc1 setup-2.14.5-2.fc40.noarch.rpm -cfde0d25ecac7e689ee083b330b78df51d346c2b7557c83a189d5df95c4e2c8d shadow-utils-4.15.1-4.fc40.x86_64.rpm -6e9b6b6196f1782419e447ac806c762d002c6930fe39b18999d9b32c24a0ecfc shadow-utils-subid-4.15.1-4.fc40.x86_64.rpm -67eede27af5b4773eb2f7ac794df694be030310d40bce462864c05b8f65c87c3 socat-1.8.0.0-2.fc40.x86_64.rpm -9fe46c08d942a5eaa66d997368f372557a81383fe9831ddeb801bccdde64f28b sqlite-libs-3.45.1-3.fc40.x86_64.rpm -08c40fb895e75add08d2c239d7bd24a0633ffaafed430f7ad5b464d1eea2a8b6 systemd-255.18-1.fc40.i686.rpm -51cd1eaa48613b981940d81ae76ea610c28265104e289a434a845ebf9e8f85a4 systemd-255.18-1.fc40.x86_64.rpm -d3c0e2fcfce8b412667f58d63f927076b01f51e0f36f5a7d4b4038efbebc95a0 systemd-boot-unsigned-255.18-1.fc40.x86_64.rpm -1fa7a7e4fcfa4f78c9bb0b5f95e100d3c9f36841368189339449612e7edb2a3c systemd-libs-255.18-1.fc40.i686.rpm -aceabc4c1848f8154afba961b3dfac1c95ec6c72cdf9238ceca384b699af3cef systemd-libs-255.18-1.fc40.x86_64.rpm -c8f5b5c50202965f86dc9139de3082a972e2a0222a1a426ce76a30d47cba5f32 systemd-networkd-255.18-1.fc40.x86_64.rpm -746e0db539b7c9a8bba425957304ca311878b198a2f6d63e60c42c266688c1dc systemd-pam-255.18-1.fc40.i686.rpm -71e42c637702d75b5a66058cb087c4ef7d0475da635ccfd0f40f71372d0e7de7 systemd-pam-255.18-1.fc40.x86_64.rpm -07082ff756a9db4851f996eb03e75f2d499a26bd42211a984788af5a1eeed300 systemd-resolved-255.18-1.fc40.x86_64.rpm -5dc7bdcbb590edf79d8ae90bf99a94d535b055816864ffdb2546acafc97d2736 systemd-udev-255.18-1.fc40.x86_64.rpm -65819c502727dc293a71a74b9a5f6b0ba781f12a99c5d5535085f168e5eac56e tar-1.35-3.fc40.x86_64.rpm -0478e12152cc3432a31dfca5ddbc80966800af437c6d7c0b26be307d5e1272e7 tpm2-tools-5.7-1.fc40.x86_64.rpm -c3be8a6d0ea23b1d0bf466b19857b97f7ffde811ad7adec0599161059d84cc74 tpm2-tss-4.1.3-1.fc40.x86_64.rpm -5df98756883badf7743cdd75f5689b62606bff0b74494b20241cb9d78335c251 tpm2-tss-fapi-4.1.3-1.fc40.x86_64.rpm -d35ca6852dfea66d10046dd8b38a77e89443ce2006bc31782abfead826dba029 tzdata-2025b-1.fc40.noarch.rpm -e1d443f7dcaec55eedc34bb66dd798ba9901dba69a169cff46f6c45671a3b3fa unbound-anchor-1.21.1-11.fc40.x86_64.rpm -8eb278cecd9f28fa4131dc402a31c74c427626aae53b2231bb452e745a9e9346 unbound-libs-1.21.1-11.fc40.x86_64.rpm -36ffa617a0dfe523424a28290241a81cd51f7d82e776e58131f16d092d49797b util-linux-2.40-0.9.rc1.fc40.i686.rpm -945aa536bc30050abc1870cef167cb944cf78d6628923476db43201a0054574b util-linux-2.40.2-1.fc40.x86_64.rpm -7ec1b5df780c5a30f8e901179480125a6ea87f1f7bad3b69da7f4b351b88c3dd util-linux-core-2.40-0.9.rc1.fc40.x86_64.rpm -b1aa4e816c01c08c18924865640f214f717cdfc66837e53a24b8edfb80a86f9d util-linux-core-2.40.2-1.fc40.x86_64.rpm -673532a506dff0ca46cd4bb5fbf772d2039e4c11e648eafa221f207139ebb798 vim-common-9.1.1275-1.fc40.x86_64.rpm -d32e2c404e54d75fb7c7c4cd3dece12123418f1798a964047fe5ca70221db002 vim-data-9.1.1275-1.fc40.noarch.rpm -968283f6290df2cbf2dd699411b1cca450769b30f28b8b8a9a9f4a4916d5ae4f vim-enhanced-9.1.1275-1.fc40.x86_64.rpm -80194d554770c211bd7c3fa368b3fbb94f7021504d4c87ddae4544a6eb183342 vim-filesystem-9.1.1275-1.fc40.noarch.rpm -69fd53fe41a8811e904b5429b1934a413d88978ec54d5c9e64370be32cbfc2ef wget2-2.2.0-2.fc40.x86_64.rpm -a00cc0a87c60ffbf5495a9796ac7074e6a47e0bebbb8c137d902014cd7ff5a30 wget2-libs-2.2.0-2.fc40.x86_64.rpm -09822d8d386dc81619639415a211b34592b0c5d43f7be288691cc4d933a0542c wget2-wget-2.2.0-2.fc40.x86_64.rpm -cf0306ceed1c6b3be39060d85f16b1953b464d3a625488b170d3b7aadf600645 which-2.21-41.fc40.x86_64.rpm -4ede95a2fa3bc0ae617c8bf3a375b800163d58733b4829b15d9f038505d79fee whois-nls-5.5.20-3.fc40.noarch.rpm -e2195010e857f56b19246f8b821f9391922880b7691b3728a413f540edc890a6 xkeyboard-config-2.41-1.fc40.noarch.rpm -69b64249d3c26a5efbd0ffa5802aa08033822c2d4378a4f44261618de6d38e4d xxd-9.1.1275-1.fc40.x86_64.rpm -9a0f6eb8d2784d7e3ee062c5deb3b9af41e7e6c6d115b6da8420bde453e41744 xz-5.8.1-2.fc40.x86_64.rpm -c6d64a788bfbbedc6eeab2347274864cade272c4ad69d77cb30d8b602e25e7f0 xz-libs-5.8.1-2.fc40.i686.rpm -cbad4c25b1acbf152273c2fae29e42ddffa03414938a7c755b5afa5f2ba45a26 xz-libs-5.8.1-2.fc40.x86_64.rpm -9e263e0a9b656178519de20733f3e0950fef494aa056daaa2004b522ba50b952 yajl-2.1.0-23.fc40.x86_64.rpm -ffab1c8720480b498f65d0d480825ccd890e4f797c3850712879eb04a4739690 zlib-ng-compat-2.1.7-2.fc40.i686.rpm -e50b69054de16d757f5667e3acf2e7439302c91a9c418243467f288dfb79f6ea zlib-ng-compat-2.1.7-2.fc40.x86_64.rpm +49750ebd8f565bdb9c94250faa419863d533e826661f19bdbeab40d14461a80c WALinuxAgent-udev-2.9.0.4-1.fc38.noarch.rpm +3abc25be1ac0f25e823b1202cc496dcb643c1a623f49e2268993d2a5ed2d7597 aardvark-dns-1.10.0-1.fc38.x86_64.rpm +8a1b07de91587e5290ec0be9206741ca3bde0d1fdb9459e89900d3ad5aeed068 alternatives-1.26-1.fc38.x86_64.rpm +858f6ff773a94528451050c17beb16e3bb427ebfab9e1488e99671ff404c5c63 audit-libs-3.1.2-8.fc38.i686.rpm +19416b019f048f97266f7146f7c626a99ba62dc4e9c920c2d08c520d07f28af7 audit-libs-3.1.2-8.fc38.x86_64.rpm +17f200a45179c59193ab7c72a4641b502ab5c524f0e5a0d59fd95aa6f15bffc8 authselect-1.4.3-1.fc38.x86_64.rpm +d2f324c915eb5e14542a55636c8e49c1cd75b17db1a8c7b11693a989629a250b authselect-libs-1.4.3-1.fc38.x86_64.rpm +718d95c40b41c2f0ecc8dc2290ebb91b529ba3be7accbad9c30c88e9ce408349 basesystem-11-15.fc38.noarch.rpm +64d2f098c9196f27e22d96270a79e42b547f4b3c24add0b2e8d3829a2f20556f bash-5.2.26-1.fc38.x86_64.rpm +0551362b7c7efc44188147b482179f477240cb8146129d68e9f8a584af0cee68 bzip2-libs-1.0.8-13.fc38.i686.rpm +95273426afa05a81e6cf77f941e1156f6a0a3305a7768a02c04a4164280cf876 bzip2-libs-1.0.8-13.fc38.x86_64.rpm +43df24cf3974e8f8b5472a0389d519282ee55c7a720460351d0c75c4866ccf19 ca-certificates-2023.2.60_v7.0.306-1.0.fc38.noarch.rpm +152f433af3f709a3f5060505f5b3b009e3da8ea455d6d1dab5c3ed2173f9e016 catatonit-0.1.7-14.fc38.x86_64.rpm +eec292696b1d3db55380f3fc6f4a04eac76cc8a09fa8c795783e9af3c6385627 conmon-2.1.8-2.fc38.x86_64.rpm +17dfa6a009e8e80119911c2fbde44b43425f2bfe9abf677f68657f0d96603d2a conntrack-tools-1.4.7-1.fc38.x86_64.rpm +9c2a069283d2249b88a99c956ed9c6e4b85eb2b9fef79b5588865ac332b6bc31 container-selinux-2.228.1-1.fc38.noarch.rpm +d9fb9be5604e68ea59a87c14c09f89cdbaddba7ec5761ecded9211ad976db910 containerd-1.6.19-1.fc38.x86_64.rpm +db4691e52e7d8b9614445428ef8f43a91b0d7069ffd07000c606e7d562df9804 containernetworking-plugins-1.3.0-2.fc38.x86_64.rpm +cd9583500892b5f50f51167f5c4b16855e15808a79e5137e0c6674e19f305930 containers-common-1-89.fc38.noarch.rpm +66f5951f5ae9c8ec9cddf1ff87b0c8aea29298c763712bfec0ed507f57b6d3e6 containers-common-extra-1-89.fc38.noarch.rpm +1100feca307bb7159a00f336fa5303fd52ded761fc25c77fdc5f095e2add29b3 coreutils-single-9.1-12.fc38.x86_64.rpm +75ecf8c60bea53432b973d9391e3bdb1b6cdc1f1cad0a7b56aabfb8b70252832 cpio-2.13-14.fc38.x86_64.rpm +65ed3fa87ea138a51a5280848aac565e996c3217fa9cd9eafa8a4a2c05f7b37d cracklib-2.9.11-1.fc38.i686.rpm +9e13cb10c94f5de3573a2cacd153050d6dad05fe13c50c8fa69406e8f881d3d9 cracklib-2.9.11-1.fc38.x86_64.rpm +c09679c55fdeef598c6cd9118e8a2141da369dcf27ed9e9ede8e42ffc44743f2 cracklib-dicts-2.9.11-1.fc38.x86_64.rpm +3c6985fe1a0388482131411b5a0811658411056ac7b96687225bf77143645ca4 criu-3.18-1.fc38.x86_64.rpm +979323d3e3e891195ebb48edbf1e741f14937ecd3e3639f8c86eabdce0c30b94 criu-libs-3.18-1.fc38.x86_64.rpm +916505d10d67844ea4e03b42ed276312437896663978acd6e2085f7355998389 crun-1.14.3-1.fc38.x86_64.rpm +6809fe060dc93dfed723bd8faca0f5d65e4f6c62aebd2f9f39f679413b260539 crypto-policies-20230301-1.gita12f7b2.fc38.noarch.rpm +d1880b8f3e8a5fa943ba033c96dc964fac3e07c34cbd0bb8ac2b0fdcb57abcbc crypto-policies-scripts-20230301-1.gita12f7b2.fc38.noarch.rpm +418287cd51f9ae9e5b74a18112a062a49ee51d22656dea86d08daba311c93e44 cryptsetup-2.6.1-1.fc38.x86_64.rpm +364cfba8a49773110ea200a533037afd99307b80a28333fc5f900f528a66333e cryptsetup-libs-2.6.1-1.fc38.i686.rpm +070d86aca4548e9148901881a4ef64d98c5dfd4ea158e9208c650c7f4b225c47 cryptsetup-libs-2.6.1-1.fc38.x86_64.rpm +61e1f069ced21f62d486b0dacc14aed2a7fdbbcff84ad40ef9eedd6b450f4afb curl-8.0.1-6.fc38.x86_64.rpm +9400386fc0a427faa3d07c1d43d99f2e49bfcfda5c16d0df035329cd1e6c69d6 curl-minimal-8.0.1-6.fc38.x86_64.rpm +b570b4857289cf32ed57d0d84cb861677a649cef7c5cc2498a249d6593eb3614 cyrus-sasl-lib-2.1.28-9.fc38.x86_64.rpm +faa01592782cad3faec0c0a07a3fa17bd0a793dc906fcb7ffedb975f1d077be4 dbus-1.14.10-1.fc38.x86_64.rpm +6652f5d40acfaeda20555bdd63e964f123e8ab4a52f8c8890e749e06b6bae3e0 dbus-broker-33-1.fc38.x86_64.rpm +a97ebbbaf97348a0bcf967eb8c3c822fc30154cf9679f5a1cb21f7c507077bbd dbus-common-1.14.10-1.fc38.noarch.rpm +66d83dc8eb183afdd674113c82334dd59c67085bff294057533c9d86260064c7 device-mapper-1.02.197-1.fc38.x86_64.rpm +713ad6733908fd52de3cef5d613d08309caa663fe53f972f47c39318ce3edf3e device-mapper-libs-1.02.197-1.fc38.i686.rpm +518af206ca813e823d01ac0819ca29b5804446b24f5765037ff69d809a93cd49 device-mapper-libs-1.02.197-1.fc38.x86_64.rpm +f20b884c9452d4cf839707d4ff38191d34dd2035c87f832c697f6f328fa1b36b diffutils-3.10-1.fc38.x86_64.rpm +9b82b697e3f3f7cb1c6c411a142b09fe71562e7ac34778b1f1c5e6b4d232a8cc dracut-059-5.fc38.x86_64.rpm +8bcdb9b5ce9b5e19c4f772f52c2c40712491d2a953496e415c380a21a792b050 duktape-2.7.0-2.fc38.x86_64.rpm +b25a042f52c8cdc1ddab56fc726d98789ff902d5264d9835f5b910db3e565213 e2fsprogs-1.46.5-4.fc38.x86_64.rpm +1ca049bff926a8ec9b6e0e69f23662934eab96c35d8eda1cf429a2a31f186045 e2fsprogs-libs-1.46.5-4.fc38.x86_64.rpm +3e0ec4d7b4b95d10f58c5269688e03da7abb4d73169c76761f4fc7e7f7797a47 ec2-utils-1.2-47.amzn2.noarch.rpm +5b5195bb6dacdd7c78522675ce83784e2befd1bdbadb14c609d9e1488754ca79 efitools-1.9.2-9.fc38.x86_64.rpm +ad7eb0fc4f0c561dd25324219c74eb6fdfb41e58ec3c419c1f17ea841469021e efivar-libs-39-1.fc38.x86_64.rpm +4a554b06daad6d5afae8b49a2615f1d5330c9cb4f0a78287a41fa9d0946d4827 elfutils-debuginfod-client-0.190-2.fc38.i686.rpm +8e77dbab6df03188bad35c578a3054fa9a547b5049f34000f8a3cde722b24c75 elfutils-debuginfod-client-0.190-2.fc38.x86_64.rpm +05841b0b924df5959f3820e0fe3926c1aedb833d20e5c72828560cb23b81a585 elfutils-default-yama-scope-0.190-2.fc38.noarch.rpm +d03a7939acff945b2e9fe35ccfca1d908f75d9c99b1ed52e88c97214076a7ae8 elfutils-libelf-0.190-2.fc38.i686.rpm +b6e80be0b89f98498ae91cd10e22ac837118f55ed3e1d19c0bbc450dc2795d27 elfutils-libelf-0.190-2.fc38.x86_64.rpm +3b76bc2b1546cc433e327767796a84287e0439a89df1d77d05eb5182c087f9d5 elfutils-libs-0.190-2.fc38.i686.rpm +373913149a0086f4b2bb1d73cab7270ec5e8432aa451fa5aa46c7705aec4e105 elfutils-libs-0.190-2.fc38.x86_64.rpm +869676690a68d3741c7cff1bffcab632ed2b05e78a2b8983cf09fad88f16eb14 ethtool-6.7-1.fc38.x86_64.rpm +4925dabd5b56209fe7c5536f12239c9cdba33ff7aae477d8001c245ed8c2e2ca expat-2.6.0-1.fc38.x86_64.rpm +7f7c78f598f7ff131bbe77913b9fc6b7b49d1fce30f2d8505b2d8a85458f519a fedora-gpg-keys-38-1.noarch.rpm +40f7d64e38ae31dcb3e7273c7e089705e816dc131ae87c176e2c1aad02d5b510 fedora-release-38-36.noarch.rpm +ac9ede79357b33f0d0c9087333b0dd3e3fd1cf5ccab5c36310b0ec446390e0c7 fedora-release-common-38-36.noarch.rpm +bacf386d747343cb10a2c3847c426d3e044b7de1742b4918e3b1b78cc1b54bc4 fedora-release-identity-basic-38-36.noarch.rpm +916b75b58e9a2afe5d53cb73fdabea4d0cd8b1eba9f1213754384d0ccd531e57 fedora-repos-38-1.noarch.rpm +196b3612e50069c68d67ffaf3081d25923e2b4ca7e1bad8f124515b3fa472d4a file-5.44-3.fc38.x86_64.rpm +83bb8576a018ae028ff1434c32e1f3dfab66a781e1af9057862a2d7f3d725f58 file-libs-5.44-3.fc38.x86_64.rpm +b0fc6c55f5989aebf6e71279541206070b32b3b28b708a249bd3bdeaa6c088a4 filesystem-3.18-3.fc38.x86_64.rpm +79986f917ef1bae7ca2378b16515ba44c19160f5a5eae4f6b697eda160bc26c1 findutils-4.9.0-3.fc38.x86_64.rpm +d5ae6a7d99826a17d163d9846c2705442b5792a7ccacc5169e4986cdf4b6bae2 fuse-common-3.14.1-1.fc38.x86_64.rpm +56df47937646df892dad25c6b9ae63d111328febfe86eb93096b8b0a11700b60 fuse-libs-2.9.9-16.fc38.x86_64.rpm +088ebe20ac0854c1f216883aa1f6ed8dfc7844807455f4acfef05b7a4b8509db fuse-overlayfs-1.13-1.fc38.x86_64.rpm +55ca555fe815bd360b08500889b652f50fe4c56dfafbed0cc459f2362641f1a0 fuse3-3.14.1-1.fc38.x86_64.rpm +f54340fec047cc359a6a164a1ce88d0d7ffcd8f7d6334b50dc5b3d234e3a19ac fuse3-libs-3.14.1-1.fc38.x86_64.rpm +e607df61803999da46a199d23d4acadb45b290f29b5b644e583c5526d8081178 gawk-5.1.1-5.fc38.x86_64.rpm +254c6789b4a98b96a53172e1b3fb866f71ea29a5b5fa7b39f072f33c27d897bc gawk-all-langpacks-5.1.1-5.fc38.x86_64.rpm +eb376264750aae673aa2a3218c291756023dea980640b30a3efe0f2199ff3889 gdbm-libs-1.23-3.fc38.x86_64.rpm +276c7af4b13262c0307cf2528c6d79c859ed348db68c0d2780869ea4b179dd02 gettext-envsubst-0.21.1-2.fc38.x86_64.rpm +4fb6fcf7eef64a48666ff9fe5a46344087979d9e8fd87be4d58a17cf9c3ef108 gettext-libs-0.21.1-2.fc38.x86_64.rpm +3837cbe450ceb59e1f9e7469aeb6ec98e08150773b83463725acfb2ebb77a98a gettext-runtime-0.21.1-2.fc38.x86_64.rpm +99d5c1b62e51a0cbba487122e28fb795ebfcacbba35cccd1260a29b55aff221f glib2-2.76.6-1.fc38.x86_64.rpm +3722de2434481e86312770cdd617b1c2bcdbd0cd1e1bb26c02983a1c9e8f8726 glibc-2.37-18.fc38.i686.rpm +b889f34cd5cba81ec30290756278c51955f3280daaa9c443a617f2eca2e61496 glibc-2.37-18.fc38.x86_64.rpm +8dfc9cc943ea05802e99d1eadb1100cbc936a96be5074cde8dc18e504cd285d1 glibc-common-2.37-18.fc38.x86_64.rpm +665dcc42ce82732bc471e8cde915d3f21dfd4beb9eb09090c9db1b379768580b glibc-gconv-extra-2.37-18.fc38.i686.rpm +86086e9a77b047c2966fc69963592e4870cfcbe94fcf536fcd00d51df6eb478d glibc-gconv-extra-2.37-18.fc38.x86_64.rpm +93cefd1623009d2f40a086bdecd5d3151b9b7f74ae481225c48c281940f2f52f glibc-minimal-langpack-2.37-18.fc38.x86_64.rpm +69e48c73d962e3798fbc84150dfd79258b32a4f9250ee801d8cdb9540edfc21a gmp-6.2.1-4.fc38.x86_64.rpm +940192d99005896a29de3c682259b3505652778d58ec74a003639f3ba3193a1a gnupg2-2.4.0-3.fc38.x86_64.rpm +cd41c94b8c668602f7fb5eae595e5d5c34bd1b91690b5cc06f4c8c199794dfa8 gnupg2-smime-2.4.0-3.fc38.x86_64.rpm +89faa1719b2fed71b63eba5db61783fe42a31a86a5da677fd6cb0a5879d6751f gnutls-3.8.3-1.fc38.x86_64.rpm +86475210e5d0994dbc614bdc2ad1bd85d37f1e6c2aeee08f41d803195691ee89 google-compute-engine-guest-configs-udev-20230929.00-1.fc38.noarch.rpm +e0481a0fd263907193fe9f3f080a17e89de1ef1d8a490078a6225062b4eec761 gpgme-1.17.1-5.fc38.x86_64.rpm +ad16ec814c4423d007d218a3f45d2e39d3dab00fc8c0d75eef176041594e3970 gpm-libs-1.20.7-42.fc38.x86_64.rpm +60ed241ec381a23d03fac733a72132dbdc4ba04c412add78bfc67f1b9f1b4daa grep-3.8-3.fc38.x86_64.rpm +28294f40575cdc5573f5f6e8fdefb1d625569907b08d5016689321f03bb84326 grub2-common-2.06-114.fc38.noarch.rpm +e78f25a41c3b72e0890226983370ef811b6cd008a81cfb8a95cd5e3de747feb8 grub2-tools-2.06-114.fc38.x86_64.rpm +89adaa3f5e4ceca836a89f525f6ed4a46a33e7f3c5de8b2a688a48a61d14368e grub2-tools-minimal-2.06-114.fc38.x86_64.rpm +5e95f1f40c3242809a7a047543a57046d16e5df811aa816c4aa2b0cc8b883b8e grubby-8.40-70.fc38.x86_64.rpm +cd17ffd09699224216affbbc765dfda04e1b5ccebb8e95af45a56c54ff257e2b gvisor-tap-vsock-0.7.3-1.fc38.x86_64.rpm +8ec6f2f11b854734c53b5d43638d08740b3b36f981c495d0ca17bf044b370248 gvisor-tap-vsock-gvforwarder-0.7.3-1.fc38.x86_64.rpm +166e842798813a72e4d075dcd9b6e814d7ad3fc8d1b5860281cffe7a68784b25 gzip-1.12-3.fc38.x86_64.rpm +95c1a55fd14757d33b9c8e2a85dbcda5601bc20c166f983dacbb410849fad4da iproute-6.4.0-1.fc38.x86_64.rpm +74e4a1b67ba1772df7bedbb2510a45feb110c58902c805cd105e9693c6875ddd iproute-tc-6.4.0-1.fc38.x86_64.rpm +86906f04a9f2e96289ae3dd71e3069af4fd42df7de87d7853c790f50f5baeeb6 iptables-legacy-1.8.9-4.fc38.1.x86_64.rpm +abcb71db54b690f8100f34f201fba60c21eae13e052f6b221eda7e1ee8bcc04c iptables-legacy-libs-1.8.9-4.fc38.1.x86_64.rpm +767c6eccb8dda9f9617d245233822f91c0ed478afc421c1a1ff09584b6484ade iptables-libs-1.8.9-4.fc38.1.x86_64.rpm +d5f329b2d0ec7ed50b047701f7c54b37f5ffabc27e57c2fb5462b32bc9b41583 iptables-nft-1.8.9-4.fc38.1.x86_64.rpm +f74d1e8ba95ff5df02477ba6430d09476e8a1af6e8476453d08d99b276924b25 jansson-2.13.1-6.fc38.x86_64.rpm +7fa521efbcc6a27d679a2ad6ab7d07a8a742d9de49458d4b4352f528c8c78e1d json-c-0.17-1.fc38.i686.rpm +d111f87bed4f918bbd720d509fd97072b345f3a5998910d0a0413104b6fe98c2 json-c-0.17-1.fc38.x86_64.rpm +8a3a4007d921c6a9b372e4573520866b445c18476f907b023ea19b436436ec33 kbd-2.5.1-5.fc38.x86_64.rpm +97fcab8f93c5213714e0a9d15e48440c567d2ac39e4ed12743198268082f0583 kbd-legacy-2.5.1-5.fc38.noarch.rpm +2ea5dfd9d8c8fe7f61022b41fd6f8a2653692acf30253f45e1dc67def1648f32 kbd-misc-2.5.1-5.fc38.noarch.rpm +578e492c3e144f3d0469420823d2d3733b2c0428e1ecc3b542f3d8b465567762 keyutils-libs-1.6.1-6.fc38.i686.rpm +b66376e78fe54024531d94036d10b22b5050d52a9e65682b36dc7fdf24405997 keyutils-libs-1.6.1-6.fc38.x86_64.rpm +7419671c64795b96be18231e2f5d3f95eca8e6a71771863ac035f961041c1d7c kmod-30-4.fc38.x86_64.rpm +b4021011438edc3e8a8ef6535f84e72829f6346716d079bd2cf032f72deff3f7 kmod-libs-30-4.fc38.i686.rpm +19f873b67f23362074c03d5825e709ad521278c02e44bdeb30eba6d3bb3a8e0f kmod-libs-30-4.fc38.x86_64.rpm +ac8a7628a2a4b1f742fc90719145f50cfad9ecf67e3309fcf623fb3d82c2a768 kpartx-0.9.4-2.fc38.x86_64.rpm +96c1846341e14589a7c66f4cf21bcc0788225b44ce652c5729299a701d31a2fa krb5-libs-1.21-3.fc38.i686.rpm +9ea8a9ddec5b16c157a9b3a6661957da3688969fa94c55ae9e0ba4fcb07539b9 krb5-libs-1.21-3.fc38.x86_64.rpm +cd85189eaf04f22d46b5d704d454298735bcc830ddeac035c92091335367ba46 libacl-2.3.1-7.fc38.i686.rpm +548bbee7926e3a5677c5c3495007c3494512e2ad70f38f6fd639e1e945168cdd libacl-2.3.1-7.fc38.x86_64.rpm +0d0890dba8274458d068b981164111f554b47632bf9c82d1ec41c14697f2b4af libarchive-3.6.1-4.fc38.x86_64.rpm +0ffba864821a315749a7ad1773eb43e506d26d4ef633cd0940188e76add8ebd4 libargon2-20190702-2.fc38.i686.rpm +dd044973c572e64f505f3d00482249b2b4d71369babb5395a22861fd55b21d79 libargon2-20190702-2.fc38.x86_64.rpm +03c7ea2565af3b61a6ff0d5d4e0e5c65747053fc43903b913a46337903c6fdb5 libassuan-2.5.6-1.fc38.x86_64.rpm +8b43d1f6b92bf174e95ec60e2d6f0e5b2ed76177515c4e13e2b202d9a594c139 libattr-2.5.1-6.fc38.i686.rpm +d78d7bc485f099bb08c9de55dd12ea6a984b948face1f947de6ec805663a96c5 libattr-2.5.1-6.fc38.x86_64.rpm +9e73a2b591ebf2915bfbe7f9479498a73c982a4c74e96cc555930022e3ef0aba libb2-0.98.1-8.fc38.x86_64.rpm +dca5cafabf192d1f5abe37fa06425877bf74bb6e8c5ce5cad577274b18169b94 libblkid-2.38.1-4.fc38.i686.rpm +21b5a1a024c2d1877d2b7271fd3f82424eb0bd6b95395ad3a3dae5776eec8714 libblkid-2.38.1-4.fc38.x86_64.rpm +8079443881e764cece2f8f6789b39ebbe43226cde61675bdfae5a5a18a439b5f libbpf-1.1.0-2.fc38.x86_64.rpm +faea0f6f20d52f95031352fb3f7a35d5fe2065b736694decee5728fa9a153907 libbsd-0.11.7-4.fc38.x86_64.rpm +04fdf1cee0fc12ff10757a07beb1dd014a0f23def582255ff0dbd8472868f08f libcap-2.48-8.fc38.i686.rpm +df1ecff1c2d83b5256a03aaf9bda20cfd86def263645ddd677aaa3facc525561 libcap-2.48-8.fc38.x86_64.rpm +5257031cba9a8791a277994e026b0f4c7a1cf2878505f5e1ed463fa670b67f05 libcap-ng-0.8.3-8.fc38.i686.rpm +c0b6770708d273bcbf83d78f87353ca03629150e7a2f0efcadc24c41ca140500 libcap-ng-0.8.3-8.fc38.x86_64.rpm +5922028bb5642faf00d781f34bf105ef30f1988932b4b80f6bb54e9f6eed0fd6 libcbor-0.7.0-9.fc38.x86_64.rpm +ba35e2297e78af6c55bbb2a3e46a1d0819ced548088023d32f8f5dec2a9dfb88 libcom_err-1.46.5-4.fc38.i686.rpm +4ed3e7b6b0727b86ae9af17bd4839c06762a417a263a5d22eb7fcb39714bb480 libcom_err-1.46.5-4.fc38.x86_64.rpm +8835069e84135c300459175ad85bc78f278263a392cfdf692e13c6e99c9d2c49 libcurl-minimal-8.0.1-6.fc38.i686.rpm +891b9b131d7b467a55864150a059d70c5dcc0e27696ab19e326decd3c95ab6d7 libcurl-minimal-8.0.1-6.fc38.x86_64.rpm +d7030af9e9e0dd9afc5b9ee02839c94757d6a4f8ebd3b21e6d81ba6141a76d46 libdb-5.3.28-55.fc38.x86_64.rpm +377c739918e9636e36218c7074316b7817e7cba9cff862198c478fb1c028112c libeconf-0.5.2-1.fc38.i686.rpm +56768f18f856353a2344f47f065b5f6dff164a5188d466df4bc64d675f455e42 libeconf-0.5.2-1.fc38.x86_64.rpm +974a64a10a3021de8a440ff4810a720f738951abd5bb944110cb9355d4ae8fa8 libedit-3.1-45.20221030cvs.fc38.x86_64.rpm +e9741c40e94cf45bdc699b950c238646c2d56b3ee7984e748b94d8e6f87ba3cd libevent-2.1.12-8.fc38.x86_64.rpm +768346084add1264110b08521694b0b25506b9c8b4bdbc53dc492b05cf70d052 libfdisk-2.38.1-4.fc38.i686.rpm +2fb7ee2d94f7ee34cff49ab28659c07b075ed67ac147f817e19d8ee8e0adbc9c libfdisk-2.38.1-4.fc38.x86_64.rpm +cd08ba5d43459f6e3bbb3cc86caee43b74b38d551d69eae3e86861c2622da1cd libffi-3.4.4-2.fc38.i686.rpm +098e8ba05482205c70c3510907da71819faf5d40b588f865ad2a77d6eaf4af09 libffi-3.4.4-2.fc38.x86_64.rpm +0a30628b39d9c5ca81c4e073dfbf64d543284f17d4ae1325e23e3eda55f92fd9 libfido2-1.12.0-3.fc38.x86_64.rpm +10bef1a628b5691f7dc0c5e17922e98b914522c6f7c7cc02b496f63c79a57f18 libgcc-13.2.1-4.fc38.i686.rpm +18142b95b05275d198bb77cefbf0be8f2c29c60293e2e99b013f6f7e28595969 libgcc-13.2.1-4.fc38.x86_64.rpm +ef4b2686134e6be036755ee093aad43eb0ce4a4c84c93a2defb755cfeb398754 libgcrypt-1.10.2-1.fc38.x86_64.rpm +fc552c5c10bccff56930ef74422c1774dd6fd4900f61376f7602ae3f05de52d9 libgomp-13.2.1-4.fc38.x86_64.rpm +40b98bdd00b44fbf808df1d653ecf0f0ed8549643d52b68da2455c5a878f0021 libgpg-error-1.47-1.fc38.x86_64.rpm +9f2059c5d699f3dd2337f0872968123a06cf56b9f349d58bd64a5ef22a9815b4 libibverbs-44.0-3.fc38.x86_64.rpm +cb38773f9a2cd9376e860963b16db0e5125b7cc6f9e9b2779eff6162baf0070d libidn2-2.3.7-1.fc38.i686.rpm +e7e1f6b3af094a9ad5c27dc33b72cbf266640a0a50c412e2f54bec5ef10e5fdb libidn2-2.3.7-1.fc38.x86_64.rpm +75c0097330fa3c995e80b7791cbe7baf75d86f3523f67b3becaf37360fdb4b16 libkcapi-1.4.0-5.fc38.x86_64.rpm +e552fae193775507d8264f7a856fbdc51f7e594d7d8726f181312aeb9cf8b973 libkcapi-hmaccalc-1.4.0-5.fc38.x86_64.rpm +4c5af4c1d44e1720beb79fd4202d3754e2c47c4faeb4ff5bdbe2ee3d24c9a5e0 libksba-1.6.4-1.fc38.x86_64.rpm +96213462b3d77ef8e73d72510dd70210e9af4ae28f7466b88f2740743e6ca49d libmd-1.1.0-1.fc38.x86_64.rpm +f79da3b0edf002221250bce8352015009f8259793278ae59cb74d6c9e0c8395b libmetalink-0.1.3-30.fc38.x86_64.rpm +729b80bbf6ca427c21dd28d1f7029b736dc42f3f4f0c43d322ddfa168bc2ce9b libmnl-1.0.5-2.fc38.x86_64.rpm +34b0fbfe9493d0e0762bfe42137238f3eb9cee0832e1d816f5c9f0a48ac798e9 libmount-2.38.1-4.fc38.i686.rpm +14541cda2a4516ad6a17f46be6b7ad85ef5f6508d36f209f2ba7bd45bc1504e2 libmount-2.38.1-4.fc38.x86_64.rpm +db30396e0f1eb0ac81d353524bc2e022371a5e5a3bed363e101e461d8d248fca libnet-1.3-1.fc38.x86_64.rpm +cc6bca4b52d6aaca9b1cc1f4f02721301fba447a3d2f009a7b9e9c38da3eb10f libnetfilter_conntrack-1.0.9-1.fc38.x86_64.rpm +0054a179032b916330c202a4a5713d9403fedc0809ffc215e6345705f869633c libnetfilter_cthelper-1.0.0-23.fc38.x86_64.rpm +95a9876660af858339b38a2272c7307917f4cc15a6f4ffac4fbd4d6f6743473f libnetfilter_cttimeout-1.0.0-21.fc38.x86_64.rpm +e86f7341f80e9143a9cc1df8b7fcf864eccdea487fc34500b412678501b37146 libnetfilter_queue-1.0.5-4.fc38.x86_64.rpm +3c981697fe61f23ad41b615b4c3197d023ec70f713395fc3104b837c61b74294 libnfnetlink-1.0.1-23.fc38.x86_64.rpm +06f6aeaded732bcff2d7dd5c8b1c430bebc3834d0968f20e3c2918ae15502ace libnftnl-1.2.4-2.fc38.x86_64.rpm +13161773e6afa5439ff8ef19faaa3baec35930a07df21901b01726eeda4e2644 libnghttp2-1.52.0-2.fc38.i686.rpm +9daf3a24341811a5feded45fe3e111a1d01f6a91854c231ff56fa8dffb8d40f9 libnghttp2-1.52.0-2.fc38.x86_64.rpm +a9d80e55bd59e26338a7778de28caf9eb3874f8d90574c879bae1302beaa862b libnl3-3.7.0-3.fc38.x86_64.rpm +28697cf1b5cb4d62c3bd154fc24a23d91a84a5bda2f974fb64bdd04e91b6cec5 libnsl2-2.0.0-5.fc38.x86_64.rpm +bf1e07244e3c9aacfe96b2b7f21e7bb678d1c52042885d3f0518301de38dd759 libnvme-1.4-2.fc38.x86_64.rpm +f4d87eb23450cd3888af3d984ad623d229e5bea482188d25f996e61a939486bf libpcap-1.10.4-1.fc38.x86_64.rpm +e0bccc94a740acf317caa4fa1fae6b0d57442ef4be36341472b7db93d588ec13 libpsl-0.21.2-2.fc38.x86_64.rpm +4625cab157ff1760c2f5053a3f75cec21b366f80c3b1ff53bf0db1a033a28439 libpwquality-1.4.5-3.fc38.i686.rpm +aefb7d2d96af03f4d7ac5a132138d383faf858011b1740c48fcd152000f3c617 libpwquality-1.4.5-3.fc38.x86_64.rpm +e3ef79196bc8cb77c35bd20f265f5a551ec3e8482a82ad92af3e802f9a302ad7 libseccomp-2.5.3-4.fc38.i686.rpm +dec378b594b79258dd8b44836c5371f316bcf5e4596d53dd84badcb6d00090df libseccomp-2.5.3-4.fc38.x86_64.rpm +46ed6b8fee11c16bb8b58f698dfba9874a8f393c1e72eb7f9a7b6802ac68dd1a libsecret-0.20.5-3.fc38.x86_64.rpm +9c938bd9917a9f977ab0572e1cea573f2a886a0a0a48587463faa5ed1d6b22e0 libselinux-3.5-1.fc38.i686.rpm +790c6d821ff575ad51242ec6832ed61c8a3c4e0ece245c3dee3292d19acb23b7 libselinux-3.5-1.fc38.x86_64.rpm +78a15621e7e3dfb5a65b8b8aa482cf5b07f08bcef217ad29435e299d6c8aec74 libselinux-utils-3.5-1.fc38.x86_64.rpm +1b6b7ad33391919a3315e398d737a764121e2fc9581f75318a999e02bfc0c7c4 libsemanage-3.5-2.fc38.x86_64.rpm +14292c07496f6db6ef27913d6d144e01ce7017c57ef990edff3d04a443e5507d libsepol-3.5-1.fc38.i686.rpm +15ec70665f200a5423589539c3253677eb3c15d7d620fd9bdfe2d1e429735198 libsepol-3.5-1.fc38.x86_64.rpm +ac0a6bf295151973d2e34392a134e246560b19b7351ced244abc1ed81dfe5b8e libsigsegv-2.14-4.fc38.x86_64.rpm +b35a0d6b1ecb151982b6a9342d00e8da9663e8a6da6b21b7c559634f7f29fd2d libslirp-4.7.0-3.fc38.x86_64.rpm +b71b1633a2b514d27dd9332d733f61ddd8c5a501360f9e8bafee313b793d6ad0 libsmartcols-2.38.1-4.fc38.i686.rpm +dbf5c73c71c798533cbecfa54ba28c42878c455df8cb382087d8a758c3ffe290 libsmartcols-2.38.1-4.fc38.x86_64.rpm +17da6760ce632b1726de4291f287f2928a70becc1ed414bb2e8b359dd1b7d815 libsodium-1.0.18-11.fc38.x86_64.rpm +faccff819eecffcee9dad49bda930a007e78b905b775b4ac0103121d7a8100db libss-1.46.5-4.fc38.x86_64.rpm +c70b6fa3ddd6765e2b1c9a8eddb4c0446214c55afa85fed7e79dfc98d91d10ee libstdc++-13.2.1-4.fc38.x86_64.rpm +261adde7d452da52848797f1ed22a219e86a8cf516d2028b0b5f892c63aedf6c libtasn1-4.19.0-2.fc38.i686.rpm +8b49dd88579f1c37e05780202e81022c9400422b830d9bdd9087161683628b22 libtasn1-4.19.0-2.fc38.x86_64.rpm +0428e7ed49ff02e96b73bf6789ee096283819931a6e62f391281622b3cf2cc8e libtirpc-1.3.4-0.rc2.fc38.x86_64.rpm +aa187ea45be32306620ad8ec6318d755075b2cad99fba7c01dc4763228a98190 libtool-ltdl-2.4.7-6.fc38.x86_64.rpm +981ba63cde3b9277cd01152a045f1269e586a53704f0a8f3409eb0bd7b4d3110 libunistring-1.1-3.fc38.i686.rpm +c4012952872a08b9662963b13e29f89388ce6e695e68fa8c37eb6e62bad62441 libunistring-1.1-3.fc38.x86_64.rpm +cd0e8eb5d983a985f7df99718fde6245997bdf088fb6086442a883ddb9ed03e3 libunistring1.0-1.0-1.fc38.x86_64.rpm +805da27b46f0d8cca2cf21a30e52401ae61ca472ae7c2d096de1cfb4b7a0d15c libusb1-1.0.26-2.fc38.x86_64.rpm +1a3ce5232d21b6f41c4bf290768684f2e665ca600d95eddfba4c4ada02854b86 libuser-0.64-2.fc38.x86_64.rpm +8ad1a4a44f1052c66318ca789042bedf43d7eea6282ab7872bfecd693b1393a0 libutempter-1.2.1-8.fc38.i686.rpm +c5c409a2d5f8890eeab48b27b9f4f02925a6bbabeb21ee5e45694c7c9009f037 libutempter-1.2.1-8.fc38.x86_64.rpm +b1a0a577a7d03111397b172c63ccf5f22bff39d1e97487d8f6962afc704020ed libuuid-2.38.1-4.fc38.i686.rpm +876ef0556ddeca2c8f56536c80a2f6e0f64357f40bacb92f483adb8a0ff29af2 libuuid-2.38.1-4.fc38.x86_64.rpm +79f80f95acb8fceb4beaa6c1343bc07e9cf6b691ec0a77b79c82a2e74c5845f6 libverto-0.3.2-5.fc38.i686.rpm +292791eb37bc312e845e777b2e0e3173e2d951c2bfbbda125bc619dced7f40bc libverto-0.3.2-5.fc38.x86_64.rpm +39851f80ad4890169e7979b248744acba5cadbc46c58e8e0316aaca1fcf386ac libxcrypt-4.4.36-1.fc38.i686.rpm +52d90d6fbd1ad93f81aad2c4aa38aa52b1c1c94b83560ede25c91b5542da97a4 libxcrypt-4.4.36-1.fc38.x86_64.rpm +e5befc91bfd39ab624cf40c8a1ed32caedc0f74f9ea4fb913e4d9915c1d708c6 libxcrypt-compat-4.4.36-1.fc38.x86_64.rpm +06745f933cdacd7ba3ce77a88016d2a16a1d1887a35b09fe97e574b5743cfa25 libxkbcommon-1.5.0-2.fc38.i686.rpm +507ffdb912296768699a70c30169077b531b1612e47041551bfe523a4b7b6c7d libxkbcommon-1.5.0-2.fc38.x86_64.rpm +be808a1034e6cbf1bf789c960c3e9932b256006d3c29cd18ac3e52fb9f636377 libxml2-2.10.4-1.fc38.i686.rpm +13f2ec62e10333000a13123a4cae5ebbda270c32ece03247e45bd2b244e7bba5 libxml2-2.10.4-1.fc38.x86_64.rpm +741fcbe3e5bdbcf59d90c2f89520dc4001ac4816b57c5c77f6e8a06b53d119ab libzstd-1.5.5-1.fc38.i686.rpm +7d9a98372505c9c1dff7dfea558b20a44820fda416a609467790577a848de110 libzstd-1.5.5-1.fc38.x86_64.rpm +27958b2623e06faf37e427fd4c9750a7b9df35ce38365a93caae068d24ebc95b linux-atm-libs-2.5.1-34.fc38.x86_64.rpm +f0a48ec36269d83120425b269e47ba5c86d5a9a44e0de2665c1d55c10732d25b lua-libs-5.4.4-9.fc38.x86_64.rpm +52898293cf358e998ef24ea977d6381b0a7229ac050f1c4599c5c25823256f7b lz4-libs-1.9.4-2.fc38.i686.rpm +96a8f495896c0ff7520c2cc5c9c173d134efc9ef6c6b0364bc7533aefb578d41 lz4-libs-1.9.4-2.fc38.x86_64.rpm +36a1f0412e495e618ccd8636de3dcac9ad37d5d4c287a1acf2c9af4baa7745e0 memstrack-0.2.5-1.fc38.x86_64.rpm +d5e2e8aed96b57db482e556f593efec98a3238c3a697904eb2eeaf2df7ac4d9e mkpasswd-5.5.18-1.fc38.x86_64.rpm +d9196608152ec34832cc82d3cefa90748f30c75986f2250d8cd0fabc3d0ceba2 mokutil-0.6.0-6.fc38.x86_64.rpm +22f217f91fc2d2a666304c0b360520b13adde47761baa6fed1663bb514b6faf5 mpdecimal-2.5.1-6.fc38.x86_64.rpm +e7c9b0c39f77c6fdf68ff04d8714c10532907a8a9c3e76fb377afe546247737f mpfr-4.1.1-3.fc38.x86_64.rpm +1c055813f64e964a2647da2c889fedb183d4ff27c8a4f4b0674bdbedaee9386a mtools-4.0.43-1.fc38.x86_64.rpm +8dce127ee00b28925e68e2790ff923b51df24441839f1551f30b60b3ea642a9e nano-7.2-2.fc38.x86_64.rpm +9a9dde9e280b31ac75e38051782f72f9dd6ea6077c04e0457b087658140895c5 nano-default-editor-7.2-2.fc38.noarch.rpm +743e4ac0256db67d146a25ef464b5a784d90bc1a47872c75b8e08e0263bea19e ncurses-base-6.4-7.20230520.fc38.noarch.rpm +f0c83b76a357501ac2c9aab9c11bfe4f58e9d24197569edb682ad50ef670e906 ncurses-libs-6.4-7.20230520.fc38.i686.rpm +93ba776a8bbb47b0fd5e4c3795f7ae3d8eca6d0285c21490d5a067be827bed92 ncurses-libs-6.4-7.20230520.fc38.x86_64.rpm +d1e081f22eade2aea028e152c6690089f44fa71dc536ecccf7f57dabedd0bebd netavark-1.9.0-1.fc38.x86_64.rpm +605d6710ba42104ce0434bb37b0ca9a922a8392c14175bc782f8acb70b94c3aa nettle-3.8-3.fc38.x86_64.rpm +1875738a010be9b1d593f02a95d14afba3742e19354641b851d00b6aaac78246 nftables-1.0.5-2.fc38.x86_64.rpm +c9e8b62c6af7a60a505f881d3cc35294d8b4f51c671c05401133b02ab229c2a7 npth-1.6-12.fc38.x86_64.rpm +db4c16743be18e806ac6e64577a981b227c686ba520161231e52c57831b0188c nvme-cli-2.4-3.fc38.x86_64.rpm +18139e4f2093499d384a1b8c80d51b661c252e4039c1235a6761a280d3260543 openldap-2.6.6-1.fc38.x86_64.rpm +be8a3e233f7a19d84391f2d42f71276cc2053702635e00089b2cbe97372cff18 openssl-libs-3.0.9-2.fc38.i686.rpm +dab96630b0d442164025469b4dce3eccccd482e76ad8ae6f6392045eae147f54 openssl-libs-3.0.9-2.fc38.x86_64.rpm +9cec86553d3cd2facd166d5699fad9624d9b43a39da6fece2d54146bffae5e4d openssl-pkcs11-0.4.12-3.fc38.i686.rpm +cfa3d6feba480abdeb425bc045b525c641c7a864625b1864c2f5721903e364d8 openssl-pkcs11-0.4.12-3.fc38.x86_64.rpm +06d2101874ea4d14b4c73131c5c359d1a2e0ebe0c36a501250026e7b867a0a86 os-prober-1.81-3.fc38.x86_64.rpm +bf2316f23c31d27eeb86855d7c85bc1a696605156a4104d68079f43f7dbe60be p11-kit-0.25.3-1.fc38.i686.rpm +d64a3c3aeac4056185551050ca5eaac2427fc161d93015c20f0012cb6bab53f5 p11-kit-0.25.3-1.fc38.x86_64.rpm +dea697370ede1848c1a54fdccebf792155d98cbdc5de89e85bbc75ec7c94de8f p11-kit-trust-0.25.3-1.fc38.x86_64.rpm +065b99f3541fd5f1281be2082b77e48b835a591776e92f2327bb0462c67baed0 pam-1.5.2-16.fc38.x86_64.rpm +21c59eeb1ad62c09aadca6a4168f927ff943f82e4f764d589f5acb2ab6efc993 pam-libs-1.5.2-16.fc38.i686.rpm +63e970f7b3f8c54e1dff90661c26519f32a4bf7486c40f2dd38d55e40660230e pam-libs-1.5.2-16.fc38.x86_64.rpm +8d846f866158409c775656b39e372d59cf224936d29972d3b6d14e40d3b832ca parted-3.5-11.fc38.x86_64.rpm +7a4cd426505349a948fbc5bcc24545fbdfb7807d525a9c5a41e75dd57b79dccf passt-0^20240220.g1e6f92b-1.fc38.x86_64.rpm +43603df046850c4cf067960d8e47998de5c33955b1f865df8d66f20c1b7f676a passwd-0.80-14.fc38.x86_64.rpm +f2737b94fa026a56c7a427f8f4221ff379ea4c4c32f2fff9d95a7a7836dcc6c7 pcre2-10.42-1.fc38.1.i686.rpm +cb1caf3e9a4ddc8343c0757c7a2730bf5de2b5f0b4c9ee7d928609566f64f010 pcre2-10.42-1.fc38.1.x86_64.rpm +756f64de1e4673f0f617a9f3f12f74cceef5fc093e309d1b1d5dffef287b7d67 pcre2-syntax-10.42-1.fc38.1.noarch.rpm +48efa34ce50ae936ab9fe437aa59396d4557ff39fa22cf36c5460d3a986e502f pcsc-lite-1.9.9-3.fc38.x86_64.rpm +aa02afed121e9f5fa9590d75c0b237b6c497ae58c91e0022844b38f594feaeb7 pcsc-lite-ccid-1.5.2-1.fc38.x86_64.rpm +07dc5536982278f38c89517465384ef9f376cd27f0b200806268723993da01ad pcsc-lite-libs-1.9.9-3.fc38.x86_64.rpm +e7509cf0ec99ce89e8e88e9352f733eb9ad14a9c77e0bbfd64826a3de0e4a150 pigz-2.7-3.fc38.x86_64.rpm +e521385a42b3350c0d17e3cbddc0b69c9cf4052d1b77cc8bea2179e05b7d374a pinentry-1.2.1-2.fc38.x86_64.rpm +4ebc0a04029600d8c289eed099eaf05a45e1213a6a68246a22bb7a8eedf49c2f podman-4.8.3-1.fc38.x86_64.rpm +440fc5c6e6a37c47f13d1fb53a03f5cb0155592a5bcf9312e2d083d4bed0ad40 policycoreutils-3.5-1.fc38.x86_64.rpm +716096df1b34d768c3e6a5985de8e1ee58b2183ad9f987aa754e592bd2793c70 polkit-122-3.fc38.1.x86_64.rpm +56705b6a1526960d534b0d3e4247deb4eef2b5fa64ceb03544281b8e9bdc4597 polkit-libs-122-3.fc38.1.x86_64.rpm +7ffa0438229228bf5ba18945936d52c3620c95f4a3ffc5c5f0f8774fececac0a polkit-pkla-compat-0.1-23.fc38.x86_64.rpm +fb3fabd657b8f8603c6e19858beb0d506cf957bbca2f3feb827b64c94563b31f popt-1.19-2.fc38.x86_64.rpm +3d5eeb8914b509cebcdf9eb67a70b701727b0b9a77078cd5f6379d751febb969 procps-ng-3.3.17-11.fc38.x86_64.rpm +8b3f681cd05e071d4c7b21eff4684a3ca7674599ee984cccd6a69a685eb8a41c protobuf-c-1.4.1-4.fc38.x86_64.rpm +6983318d6b2dfd4eea29448e9853b74b1d009ab37be7add3ff304ff0483714cb psmisc-23.6-2.fc38.x86_64.rpm +5d57133d4f5ace3ca45aaa59ae4b8f6e907a51df6503f3747ed0e5316de3b4dc publicsuffix-list-dafsa-20240107-1.fc38.noarch.rpm +e59d71a66652002e1bc6331db17a061bd3ceacf1a449be8af9f7cefc50af4ad7 python-pip-wheel-22.3.1-3.fc38.noarch.rpm +7417816bd96d7b49e5a98c85eba313afaa8b8802458d7cd9f5ba72ecc31933e3 python-setuptools-wheel-65.5.1-2.fc38.noarch.rpm +e7837a16faeebd935b1033f8c872cd05107d88319f7af258c80755ce4cd36260 python-unversioned-command-3.11.8-1.fc38.noarch.rpm +51eeb7d287b27728ee3d8eeac0ced9ba5e4a3bc49bf63f30b979a4aaefd95eea python3-3.11.8-1.fc38.x86_64.rpm +c5263f62cf069abecb9a32adbf9f80e4f61d2cb558fb4856a08a9202ac47a7d5 python3-libs-3.11.8-1.fc38.x86_64.rpm +c029ef244792ba632ef8ddc14f6d9e3c43f494b73e87e12782fab3a3d29c0304 qemu-user-static-7.2.8-1.fc38.x86_64.rpm +1e4573cebcbdeb383b76a2cef2315e55900d50f16fb49f994b83fc4892da9c9b qemu-user-static-aarch64-7.2.8-1.fc38.x86_64.rpm +5fd2e214b07c94f46934bb737c4d4976be5cb5b7b85e224681e90a644cb19a44 qemu-user-static-alpha-7.2.8-1.fc38.x86_64.rpm +efe35f24d61355f1e9d627b9c0a03e0c221f612d38bc791ae8e0eff6ca3b8040 qemu-user-static-arm-7.2.8-1.fc38.x86_64.rpm +f2484eff42fcb513af733504132fb86964e159df8520d4d235415f67b1f12287 qemu-user-static-cris-7.2.8-1.fc38.x86_64.rpm +14f8c18222d333ccce15d42653fe9b7ea26afbb860a4ede7df065c4a24a61c50 qemu-user-static-hexagon-7.2.8-1.fc38.x86_64.rpm +272b9b84827b04aad46b2e7d94ebc2e330eeefa65da5c6ae10bb482eef8a071f qemu-user-static-hppa-7.2.8-1.fc38.x86_64.rpm +d61ed9e2bc97f577b4c790b0e8acd222b626834846220d765e2abf182cdcaf3b qemu-user-static-loongarch64-7.2.8-1.fc38.x86_64.rpm +08b9990c3496e43a0ae38c1d52d3f64297e2d55d6bb46cc460470e92f1a8bf58 qemu-user-static-m68k-7.2.8-1.fc38.x86_64.rpm +94a2d0a5c44ec1de3546f37a82926d786ad1f16707d7d8cd887e8567064ea448 qemu-user-static-microblaze-7.2.8-1.fc38.x86_64.rpm +5c8eec2ef6c134dc512e886ee4d465b22061168522700b8c63c6615857e0dc83 qemu-user-static-mips-7.2.8-1.fc38.x86_64.rpm +0e29d61f88b56a911fd55f40348ade12c623f4fd555fb60dae68cc43133103fc qemu-user-static-nios2-7.2.8-1.fc38.x86_64.rpm +46b010463816a9ebf50bc35a430a5055db6748656931b54e0fae5e2e57df54e2 qemu-user-static-or1k-7.2.8-1.fc38.x86_64.rpm +c2893f9bc10a31220ff1910c42b719f51d2d7a80a6d47d788a0ec8270275d645 qemu-user-static-ppc-7.2.8-1.fc38.x86_64.rpm +dcf6892031dcd9f4dc3119fd2280c6e5e129ef643a955568265715aabd1154dd qemu-user-static-riscv-7.2.8-1.fc38.x86_64.rpm +031fbd6850549063874b63e20e041005d4ff9cea5a5f536a794620e3497b4650 qemu-user-static-s390x-7.2.8-1.fc38.x86_64.rpm +717c96869477d0115a4ece7beb7966f2d71fb30845ba5f12110b780a65ae9e8c qemu-user-static-sh4-7.2.8-1.fc38.x86_64.rpm +97a99ab9e1ccecddeb370639c3486c4ea88840c2c892a2f6fc422f420feb7227 qemu-user-static-sparc-7.2.8-1.fc38.x86_64.rpm +e1ea327412cc2970e7d8b50e590855e4aeb5ee2882897a52194e5b662534e20f qemu-user-static-x86-7.2.8-1.fc38.x86_64.rpm +f09f00c34111f002a203a64f73824bc147fe5bcda2735ffb6ab83bd3d975e76a qemu-user-static-xtensa-7.2.8-1.fc38.x86_64.rpm +0de622e220594daf822dd0015e099b9940b5d92768703ead1b53e1daf06aecf0 qrencode-libs-4.1.1-4.fc38.i686.rpm +49ec489f168c1671a2babb690edfb020a5252f38e8d0b2d96465070abd2b0d70 qrencode-libs-4.1.1-4.fc38.x86_64.rpm +86394991888b35c06a424235d49750c02319f768d5e0da3554b57a4fba6fe513 readline-8.2-4.fc38.i686.rpm +3c802e4ae00ad884944e7a3984e9c04b2a2929d408fa820622c4f5169263df30 readline-8.2-4.fc38.x86_64.rpm +ff3ac983386ce4bc4ce48dc67bb0b50b8b078a35ccc6897597b82852a648bdf1 rpm-4.18.2-1.fc38.x86_64.rpm +a30278c06ac5aa081864793a3960f7d2701f0d7f0b594724d9aec7b127f5b758 rpm-libs-4.18.2-1.fc38.x86_64.rpm +1e8e041e2fe2bccbed82917515c8605c2ea1947dcfed95377f7f7b3038e0921b rpm-plugin-selinux-4.18.2-1.fc38.x86_64.rpm +9a56ebe2e1e807bd0c81ccb096a02ab1ffc34ffcd199da7f79ab352edb9aa0b3 rpm-sequoia-1.6.0-1.fc38.x86_64.rpm +105ae7c0507c75babc124ae653dc0310988a0ae26a443c850c06b2cdd5709486 runc-1.1.12-1.fc38.x86_64.rpm +61985efd54550e1fa9f31575eaa57232034d72d4eb169e88e70183d43727962c sbsigntools-0.9.5-1.fc38.x86_64.rpm +a6e01b89e814ec42d1c2c6be79240a97a9bd151c857f82a11e129547e069e27f sed-4.8-12.fc38.x86_64.rpm +9f494bcebfe2b7398f2a42c57a67b7cf35d580ce3159e071d70b3c013ccf365d selinux-policy-38.31-1.fc38.noarch.rpm +253702f1c3a5d72fe1f62da668a488e64c753102d1adc9d41de6b3f52bde5054 selinux-policy-targeted-38.31-1.fc38.noarch.rpm +c7efb8634b62cdab9e8894174a8c70d20eb431482277231bc54fa8ca8c3681e3 setup-2.14.3-2.fc38.noarch.rpm +8be96e09e2e44491b287be44b2b6be0c9f8aeab75fe061d3e8a28b9be19144ef shadow-utils-4.13-6.fc38.x86_64.rpm +46eaa576b2c5fcd167d9d88f823c309da0067fa8c4d35b7c535792fe862f05cd shadow-utils-subid-4.13-6.fc38.x86_64.rpm +8fc0ae5f47a2d868bbd44c5e65d68cec44272e6e7bbdc506217fa5c66499f872 slirp4netns-1.2.2-1.fc38.x86_64.rpm +868aa887826ef8c81162b97ed4440949741ecb0d78bba12291b03bdccb917877 socat-1.7.4.4-2.fc38.x86_64.rpm +be6d9e9b98733494ee5901d45b849e2dc012a6b41c3ff09d0d212002dbe15dce sqlite-libs-3.40.1-2.fc38.x86_64.rpm +5fd7f60e8a5c734506db5f0ab7e4242c38a0b696f54f9352f5e5a08085c5472c systemd-253.15-2.fc38.i686.rpm +59a9e6d63ce3024d93f4080c2a546339fa9350c8e7e2f1f32daaecebcb3036a5 systemd-253.15-2.fc38.x86_64.rpm +a7bd6a268c14438501ef55679f08366a48fcfb48f2bbbe8d7537066f31e5240d systemd-boot-unsigned-253.15-2.fc38.x86_64.rpm +e2159ca1b075e0d2cb04264de8ecc619f6aac57d2377b4745ecb3d1b6acc0632 systemd-libs-253.15-2.fc38.i686.rpm +845e8f6c1aa62ae8d944f3db87894def531d30e50efd5158d7797b94e0bd8064 systemd-libs-253.15-2.fc38.x86_64.rpm +d65bc511f98b20cf475298ccd031251ba2bcaebdda35ab42dd03628c6e1a0db0 systemd-networkd-253.15-2.fc38.x86_64.rpm +92e12260564855327e12c512c200f5136651a88037a41e2bd0d3d5613a312b05 systemd-pam-253.15-2.fc38.i686.rpm +c5663bf40c0a1cae11fd03778922b96bc661b337446307c073e05aa7cd5184ff systemd-pam-253.15-2.fc38.x86_64.rpm +6e60058b82f09d697687ba0fb63a542940b684cac9d6451046b57e277e5d7b0c systemd-resolved-253.15-2.fc38.x86_64.rpm +d382a78827900f956cac626a0f1d29913e885f342957d753bc6c866682e021ff systemd-udev-253.15-2.fc38.x86_64.rpm +1d6caa060ef12ab32bf7220b49bc0d9c858c68a8f50b060f5117a2aca63a4dc5 tar-1.34-8.fc38.x86_64.rpm +5cc364cad8cb613a0ec68076f7f8e10b72ef2e266a10d2463bf548b2b0abd13e tpm2-tools-5.5-3.fc38.x86_64.rpm +8adf29af85920514902bc4332ceb896a54f9cf89e08993c9345b62c4140f91d9 tpm2-tss-4.0.1-3.fc38.x86_64.rpm +341f8e68b79c2bcac16e01f2fa9e1afc607c7fca3ef627be2abdf2147764016c tzdata-2024a-1.fc38.noarch.rpm +232da16c546617adde46ecaa1d5367acd05f75d04570fb367123b8dd01abdea4 util-linux-2.38.1-4.fc38.i686.rpm +f0f8e33332df97afd911093f28c487bc84cbe4dcc7bb468eac5551d235acee62 util-linux-2.38.1-4.fc38.x86_64.rpm +b57dbbbee14301e89df618b398ef39b7fc841eaba6be1b6346cf37ed7695c26a util-linux-core-2.38.1-4.fc38.x86_64.rpm +cce5fcc8b6b0312caeca04a19494358888b00c125747f5c2d2bd8f006665c730 vim-common-9.1.113-1.fc38.x86_64.rpm +5fa001dbcd0752e75421b2e96aabb73265a48cdd646b02dc947da768147f2be8 vim-data-9.1.113-1.fc38.noarch.rpm +545d77bb579a8fb3e87ecd1d5acf616b4b837612f189206171edad73fd4864ab vim-enhanced-9.1.113-1.fc38.x86_64.rpm +8743bcb074aed6aa20914b7d0258cd6938e3642fe3550279bb1c66c6300d936a vim-filesystem-9.1.113-1.fc38.noarch.rpm +a4c8b2a90705fed491f6f7f258904637c18773d323d39e97bf9036260b79a0f6 wget-1.21.4-1.fc38.x86_64.rpm +2c8b143f3cb83efa5a31c85bea1da3164ca2dde5e2d75d25115f3e21ef98b4e0 which-2.21-39.fc38.x86_64.rpm +84f87df3afabe3de8748f172220107e5a5cbb0f0ef954386ecff6b914604aada whois-nls-5.5.18-1.fc38.noarch.rpm +59a7a5a775c196961cdc51fb89440a055295c767a632bfa684760e73650aa9a0 xkeyboard-config-2.38-1.fc38.noarch.rpm +e7a479b7795b51bac55a183f47a1df103ec69456d716b5e87274cabbbe9db041 xxd-9.1.113-1.fc38.x86_64.rpm +e911703ffceee37ec1066344820ab0cf9ba8e43d7957395981ba68c4d411a0a4 xz-5.4.1-1.fc38.x86_64.rpm +2b3a57c5ccfd4c99ec78d8420394387782a4ac57946d63800a406a4050c3d214 xz-libs-5.4.1-1.fc38.i686.rpm +bfce8ac2a2a78a23fb931531fb3d8f530a78f4d5b17f6199bf99b93ca21858c0 xz-libs-5.4.1-1.fc38.x86_64.rpm +e6971389d89ab454bbb372859b5aee67469a0b12e57d8657c0818bca78df22f2 yajl-2.1.0-21.fc38.x86_64.rpm +c83464d6c93835b24b2c09d4c851f4a9bdacc70489a6c21447ed33ffd98c0d63 zlib-1.2.13-3.fc38.i686.rpm +c26d4d161f8eddd7cb794075e383d0f4d3a77aa88e453a2db51e53346981f04c zlib-1.2.13-3.fc38.x86_64.rpm diff --git a/image/mirror/dnf.conf b/image/mirror/dnf.conf index 32618492e..c298b8587 100644 --- a/image/mirror/dnf.conf +++ b/image/mirror/dnf.conf @@ -8,4 +8,4 @@ best=False skip_if_unavailable=True tsflags=nodocs basearch=x86_64 -releasever=40 +releasever=38 diff --git a/image/mirror/packages.txt b/image/mirror/packages.txt index 9d6240a2e..10768f830 100644 --- a/image/mirror/packages.txt +++ b/image/mirror/packages.txt @@ -19,8 +19,6 @@ mokutil nano nano-default-editor nvme-cli -openssh-server -passt-selinux passwd podman sbsigntools diff --git a/image/mirror/update_packages.sh b/image/mirror/update_packages.sh index b177f6b4a..f2245d262 100755 --- a/image/mirror/update_packages.sh +++ b/image/mirror/update_packages.sh @@ -39,7 +39,7 @@ download() { "${DNF5}" \ "--config=${DNF_CONF}" \ "--setopt=reposdir=${REPOSDIR}" \ - "--releasever=40" \ + "--releasever=38" \ download \ "--destdir=${OUTDIR}" \ --resolve --alldeps \ diff --git a/image/sysroot-tree/etc/ssh/sshd_config b/image/sysroot-tree/etc/ssh/sshd_config deleted file mode 100644 index 39016f323..000000000 --- a/image/sysroot-tree/etc/ssh/sshd_config +++ /dev/null @@ -1,5 +0,0 @@ -HostKey /var/run/state/ssh/ssh_host_ed25519_key -HostCertificate /var/run/state/ssh/ssh_host_cert.pub -TrustedUserCAKeys /var/run/state/ssh/ssh_ca.pub -PasswordAuthentication no -ChallengeResponseAuthentication no diff --git a/image/sysroot-tree/etc/systemd/system/sshd-keygen@.service.d/override.conf b/image/sysroot-tree/etc/systemd/system/sshd-keygen@.service.d/override.conf deleted file mode 100644 index 1e956c08b..000000000 --- a/image/sysroot-tree/etc/systemd/system/sshd-keygen@.service.d/override.conf +++ /dev/null @@ -1,3 +0,0 @@ -[Unit] -ConditionFileNotEmpty=|!/var/run/state/ssh/ssh_host_%i_key -Before=constellation-bootstrapper.service diff --git a/image/sysroot-tree/usr/lib/systemd/system/getty@tty1.service.d/autologin.conf b/image/sysroot-tree/usr/lib/systemd/system/getty@tty1.service.d/autologin.conf deleted file mode 100644 index ec52d1369..000000000 --- a/image/sysroot-tree/usr/lib/systemd/system/getty@tty1.service.d/autologin.conf +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=autologin -ConditionPathExists=/proc/cmdline -ConditionKernelCommandLine=|constel.console -ConditionKernelCommandLine=|constel.debug - -[Service] -ExecStart= -ExecStart=-/sbin/agetty -o '-p -f -- \\u' --noclear --autologin root %I $TERM -[Install] -WantedBy=multi-user.target diff --git a/image/sysroot-tree/usr/lib/systemd/system/serial-getty@ttyS0.service.d/autologin.conf b/image/sysroot-tree/usr/lib/systemd/system/serial-getty@ttyS0.service.d/autologin.conf deleted file mode 100644 index 24fe28a99..000000000 --- a/image/sysroot-tree/usr/lib/systemd/system/serial-getty@ttyS0.service.d/autologin.conf +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=autologin -ConditionPathExists=/proc/cmdline -ConditionKernelCommandLine=|constel.console -ConditionKernelCommandLine=|constel.debug - -[Service] -ExecStart= -ExecStart=-/sbin/agetty -o '-p -f -- \\u' --keep-baud --autologin root 115200,57600,38400,9600 - $TERM - -[Install] -WantedBy=multi-user.target diff --git a/image/sysroot-tree/usr/lib/systemd/system/sshd-keygen.target b/image/sysroot-tree/usr/lib/systemd/system/sshd-keygen.target deleted file mode 100644 index 3c4dd2b1c..000000000 --- a/image/sysroot-tree/usr/lib/systemd/system/sshd-keygen.target +++ /dev/null @@ -1,3 +0,0 @@ -[Unit] -Wants=sshd-keygen@ed25519.service -PartOf=sshd.service diff --git a/image/sysroot-tree/usr/libexec/openssh/sshd-keygen b/image/sysroot-tree/usr/libexec/openssh/sshd-keygen deleted file mode 100644 index c366b0d0a..000000000 --- a/image/sysroot-tree/usr/libexec/openssh/sshd-keygen +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/bash -# Taken from the original openssh-server package and slightly modified - -set -x - -# Create the host keys for the OpenSSH server. -KEYTYPE=$1 -case $KEYTYPE in -"dsa") ;& # disabled in FIPS -"ed25519") - FIPS=/proc/sys/crypto/fips_enabled - if [[ -r $FIPS && $(cat $FIPS) == "1" ]]; then - exit 0 - fi - ;; -"rsa") ;; # always ok -"ecdsa") ;; -*) # wrong argument - exit 12 ;; -esac -mkdir -p /var/run/state/ssh -KEY=/var/run/state/ssh/ssh_host_${KEYTYPE}_key - -KEYGEN=/usr/bin/ssh-keygen -if [[ ! -x $KEYGEN ]]; then - exit 13 -fi - -# remove old keys -rm -f "$KEY"{,.pub} - -# create new keys -if ! $KEYGEN -q -t "$KEYTYPE" -f "$KEY" -C '' -N '' >&/dev/null; then - exit 1 -fi - -# sanitize permissions -/usr/bin/chmod 600 "$KEY" -/usr/bin/chmod 644 "$KEY".pub -if [[ -x /usr/sbin/restorecon ]]; then - /usr/sbin/restorecon "$KEY"{,.pub} -fi - -exit 0 diff --git a/image/system/BUILD.bazel b/image/system/BUILD.bazel index 3a51c92a4..3765667b8 100644 --- a/image/system/BUILD.bazel +++ b/image/system/BUILD.bazel @@ -1,6 +1,6 @@ load("//bazel/mkosi:mkosi_image.bzl", "mkosi_image") load("//bazel/osimage:upload_os_images.bzl", "upload_os_images") -load(":variants.bzl", "CSPS", "STREAMS", "VARIANTS", "base_image", "constellation_packages", "images_for_csp", "images_for_csp_and_stream", "images_for_stream", "kernel_command_line", "kernel_command_line_dict") +load(":variants.bzl", "CSPS", "STREAMS", "VARIANTS", "autologin", "base_image", "constellation_packages", "images_for_csp", "images_for_csp_and_stream", "images_for_stream", "kernel_command_line", "kernel_command_line_dict") [ mkosi_image( @@ -10,6 +10,11 @@ load(":variants.bzl", "CSPS", "STREAMS", "VARIANTS", "base_image", "constellatio ] + glob([ "mkosi.repart/**", ]), + autologin = autologin( + variant["csp"], + variant["attestation_variant"], + stream, + ), base_trees = [ base_image( variant["csp"], diff --git a/image/system/mkosi.conf b/image/system/mkosi.conf index d97bbc1bb..b23cf00a3 100644 --- a/image/system/mkosi.conf +++ b/image/system/mkosi.conf @@ -1,7 +1,6 @@ [Distribution] Distribution=fedora -Release=40 -RepositoryKeyFetch=yes +Release=38 [Output] Format=disk diff --git a/image/system/variants.bzl b/image/system/variants.bzl index b9b1c6bd8..3cca05c95 100644 --- a/image/system/variants.bzl +++ b/image/system/variants.bzl @@ -50,29 +50,33 @@ CSPS = [ "qemu", ] -base_cmdline = "selinux=1 enforcing=0 audit=0 console=tty1 console=ttyS0" +base_cmdline = "selinux=1 enforcing=0 audit=0" csp_settings = { "aws": { "kernel_command_line_dict": { "console": "ttyS0", "constel.csp": "aws", - "mitigations": "auto,nosmt", + "idle": "poll", + "mitigations": "auto", }, }, "azure": { "kernel_command_line_dict": { + "console": "ttyS0", "constel.csp": "azure", "mitigations": "auto,nosmt", }, }, "gcp": { "kernel_command_line_dict": { + "console": "ttyS0", "constel.csp": "gcp", "mitigations": "auto,nosmt", }, }, "openstack": { + "kernel_command_line": "console=tty0 console=ttyS0 console=ttyS1", "kernel_command_line_dict": { "constel.csp": "openstack", "kvm_amd.sev": "1", @@ -82,8 +86,9 @@ csp_settings = { }, }, "qemu": { - "kernel_command_line": "constel.console", # All qemu images have console enabled independent of stream + "autologin": True, "kernel_command_line_dict": { + "console": "ttyS0", "constel.csp": "qemu", "mitigations": "auto,nosmt", }, @@ -131,10 +136,11 @@ attestation_variant_settings = { stream_settings = { "console": { - "kernel_command_line": "constel.console", + "autologin": True, }, "debug": { - "kernel_command_line": "constel.debug", + "autologin": True, + "kernel_command_line": "constellation.debug", }, "nightly": {}, "stable": {}, @@ -175,6 +181,26 @@ def constellation_packages(stream): "//bootstrapper/cmd/bootstrapper:bootstrapper-package", ] + base_packages +def autologin(csp, attestation_variant, stream): + """Generates a boolean indicating whether autologin should be enabled for the given csp, attestation_variant and stream. + + Args: + csp: The cloud service provider to use. + attestation_variant: The attestation variant to use. + stream: The stream to use. + + Returns: + A boolean indicating whether autologin should be enabled. + """ + out = None + for settings in from_settings(csp, attestation_variant, stream): + if not "autologin" in settings: + continue + if out != None and out != settings["autologin"]: + fail("Inconsistent autologin settings") + out = settings["autologin"] + return out + def kernel_command_line(csp, attestation_variant, stream): cmdline = base_cmdline for settings in from_settings(csp, attestation_variant, stream, default = {}): diff --git a/image/upload/internal/cmd/api.go b/image/upload/internal/cmd/api.go index 5cf4e0b95..5f6865998 100644 --- a/image/upload/internal/cmd/api.go +++ b/image/upload/internal/cmd/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/image/upload/internal/cmd/flags.go b/image/upload/internal/cmd/flags.go index de38c2701..26b159b44 100644 --- a/image/upload/internal/cmd/flags.go +++ b/image/upload/internal/cmd/flags.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/image/upload/internal/cmd/info.go b/image/upload/internal/cmd/info.go index d104d1dc2..cd629600e 100644 --- a/image/upload/internal/cmd/info.go +++ b/image/upload/internal/cmd/info.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -50,7 +50,7 @@ func runInfo(cmd *cobra.Command, args []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) info, err := readInfoArgs(args) if err != nil { return err diff --git a/image/upload/internal/cmd/measurements.go b/image/upload/internal/cmd/measurements.go index a61c14c0d..e117b88d9 100644 --- a/image/upload/internal/cmd/measurements.go +++ b/image/upload/internal/cmd/measurements.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/image/upload/internal/cmd/measurementsenvelope.go b/image/upload/internal/cmd/measurementsenvelope.go index 082e82ea7..878dcaf71 100644 --- a/image/upload/internal/cmd/measurementsenvelope.go +++ b/image/upload/internal/cmd/measurementsenvelope.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -54,7 +54,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "version", flags.version.Version(), "csp", flags.csp, "attestationVariant", flags.attestationVariant, "in", flags.in) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) f, err := os.Open(flags.in) if err != nil { diff --git a/image/upload/internal/cmd/measurementsmerge.go b/image/upload/internal/cmd/measurementsmerge.go index 4a74e20b2..53ec2de2c 100644 --- a/image/upload/internal/cmd/measurementsmerge.go +++ b/image/upload/internal/cmd/measurementsmerge.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -45,7 +45,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "out", flags.out, "logLevel", flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) mergedMeasurements, err := readMeasurementsArgs(args) if err != nil { diff --git a/image/upload/internal/cmd/measurementsupload.go b/image/upload/internal/cmd/measurementsupload.go index bca15e121..850883c63 100644 --- a/image/upload/internal/cmd/measurementsupload.go +++ b/image/upload/internal/cmd/measurementsupload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -53,7 +53,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "measurementsPath", flags.measurementsPath, "signaturePath", flags.signaturePath, "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { @@ -61,7 +61,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { } defer func() { if err := uploadCClose(cmd.Context()); err != nil { - log.Error("closing upload client", "error", err) + log.Error("closing upload client: %v", err) } }() diff --git a/image/upload/internal/cmd/must.go b/image/upload/internal/cmd/must.go index 2f3d8d896..fb26f2df0 100644 --- a/image/upload/internal/cmd/must.go +++ b/image/upload/internal/cmd/must.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/image/upload/internal/cmd/upload.go b/image/upload/internal/cmd/upload.go index 680b26586..22c8fa98e 100644 --- a/image/upload/internal/cmd/upload.go +++ b/image/upload/internal/cmd/upload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd diff --git a/image/upload/internal/cmd/uplosi.go b/image/upload/internal/cmd/uplosi.go index 7e6214837..13a854683 100644 --- a/image/upload/internal/cmd/uplosi.go +++ b/image/upload/internal/cmd/uplosi.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cmd @@ -59,8 +59,8 @@ func runUplosi(cmd *cobra.Command, _ []string) error { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "raw-image", flags.rawImage, "attestation-variant", flags.attestationVariant, "csp", flags.provider, "ref", flags.version.Ref(), "stream", flags.version.Stream(), - "version", flags.version.Version(), "region", flags.region, "bucket", flags.bucket, "distribution-id", flags.distributionID, "out", flags.out, "uplosi-path", flags.uplosiPath) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return err diff --git a/image/upload/upload.go b/image/upload/upload.go index 2a2ab1423..7f4886a2d 100644 --- a/image/upload/upload.go +++ b/image/upload/upload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // upload uploads os images. diff --git a/internal/api/attestationconfigapi/BUILD.bazel b/internal/api/attestationconfigapi/BUILD.bazel index 80397adb3..5db942eb3 100644 --- a/internal/api/attestationconfigapi/BUILD.bazel +++ b/internal/api/attestationconfigapi/BUILD.bazel @@ -5,24 +5,32 @@ go_library( name = "attestationconfigapi", srcs = [ "attestationconfigapi.go", + "client.go", "fetcher.go", - "version.go", + "reporter.go", + "snp.go", ], importpath = "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi", visibility = ["//:__subpackages__"], deps = [ + "//internal/api/client", "//internal/api/fetcher", "//internal/attestation/variant", "//internal/constants", "//internal/sigstore", + "//internal/staticupload", + "@com_github_aws_aws_sdk_go//aws", + "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", ], ) go_test( name = "attestationconfigapi_test", srcs = [ + "client_test.go", "fetcher_test.go", - "version_test.go", + "reporter_test.go", + "snp_test.go", ], embed = [":attestationconfigapi"], deps = [ diff --git a/internal/api/attestationconfigapi/attestationconfigapi.go b/internal/api/attestationconfigapi/attestationconfigapi.go index d0ff50872..e170da869 100644 --- a/internal/api/attestationconfigapi/attestationconfigapi.go +++ b/internal/api/attestationconfigapi/attestationconfigapi.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -15,7 +15,7 @@ information contained in the objects. Especially the paths used for the API are in these helper methods. Regarding the decision to implement new types over using the existing types from internal/config: -AttestationCfg objects for AttestationCfg API need to hold some version information (for sorting, recognizing latest). +AttesationCfg objects for AttestationCfg API need to hold some version information (for sorting, recognizing latest). Thus, existing config types (AWSNitroTPM, AzureSEVSNP, ...) can not be extended to implement apiObject interface. Instead, we need a separate type that wraps _all_ attestation types. In the codebase this is done using the AttestationCfg interface. The new type AttestationCfgGet needs to be located inside internal/config in order to implement UnmarshalJSON. diff --git a/internal/api/attestationconfigapi/cli/BUILD.bazel b/internal/api/attestationconfigapi/cli/BUILD.bazel index df2856aeb..982541829 100644 --- a/internal/api/attestationconfigapi/cli/BUILD.bazel +++ b/internal/api/attestationconfigapi/cli/BUILD.bazel @@ -10,7 +10,8 @@ go_binary( go_library( name = "cli_lib", srcs = [ - "compare.go", + "aws.go", + "azure.go", "delete.go", "main.go", "upload.go", @@ -20,18 +21,16 @@ go_library( visibility = ["//visibility:private"], deps = [ "//internal/api/attestationconfigapi", - "//internal/api/attestationconfigapi/cli/client", - "//internal/api/fetcher", "//internal/attestation/variant", + "//internal/cloud/cloudprovider", "//internal/constants", "//internal/file", "//internal/logger", "//internal/staticupload", "//internal/verify", - "@com_github_aws_aws_sdk_go_v2//aws", + "@com_github_aws_aws_sdk_go//aws", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", "@com_github_aws_aws_sdk_go_v2_service_s3//types", - "@com_github_google_go_tdx_guest//proto/tdx", "@com_github_spf13_afero//:afero", "@com_github_spf13_cobra//:cobra", ], diff --git a/internal/api/attestationconfigapi/cli/aws.go b/internal/api/attestationconfigapi/cli/aws.go new file mode 100644 index 000000000..578caadc4 --- /dev/null +++ b/internal/api/attestationconfigapi/cli/aws.go @@ -0,0 +1,24 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package main + +import ( + "context" + "fmt" + + "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" +) + +func deleteAWS(ctx context.Context, client *attestationconfigapi.Client, cfg deleteConfig) error { + if cfg.provider != cloudprovider.AWS || cfg.kind != snpReport { + return fmt.Errorf("provider %s and kind %s not supported", cfg.provider, cfg.kind) + } + + return client.DeleteSEVSNPVersion(ctx, variant.AWSSEVSNP{}, cfg.version) +} diff --git a/internal/api/attestationconfigapi/cli/azure.go b/internal/api/attestationconfigapi/cli/azure.go new file mode 100644 index 000000000..a10fb4e13 --- /dev/null +++ b/internal/api/attestationconfigapi/cli/azure.go @@ -0,0 +1,61 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package main + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" + "github.com/edgelesssys/constellation/v2/internal/staticupload" +) + +func deleteAzure(ctx context.Context, client *attestationconfigapi.Client, cfg deleteConfig) error { + if cfg.provider != cloudprovider.Azure && cfg.kind != snpReport { + return fmt.Errorf("provider %s and kind %s not supported", cfg.provider, cfg.kind) + } + + return client.DeleteSEVSNPVersion(ctx, variant.AzureSEVSNP{}, cfg.version) +} + +func deleteRecursive(ctx context.Context, path string, client *staticupload.Client, cfg deleteConfig) error { + resp, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(cfg.bucket), + Prefix: aws.String(path), + }) + if err != nil { + return err + } + + // Delete all objects in the path. + objIDs := make([]s3types.ObjectIdentifier, len(resp.Contents)) + for i, obj := range resp.Contents { + objIDs[i] = s3types.ObjectIdentifier{Key: obj.Key} + } + if len(objIDs) > 0 { + _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ + Bucket: aws.String(cfg.bucket), + Delete: &s3types.Delete{ + Objects: objIDs, + Quiet: toPtr(true), + }, + }) + if err != nil { + return err + } + } + return nil +} + +func toPtr[T any](v T) *T { + return &v +} diff --git a/internal/api/attestationconfigapi/cli/client/BUILD.bazel b/internal/api/attestationconfigapi/cli/client/BUILD.bazel deleted file mode 100644 index c90cb34b9..000000000 --- a/internal/api/attestationconfigapi/cli/client/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel/go:go_test.bzl", "go_test") - -go_library( - name = "client", - srcs = [ - "client.go", - "reporter.go", - ], - importpath = "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi/cli/client", - visibility = ["//:__subpackages__"], - deps = [ - "//internal/api/attestationconfigapi", - "//internal/api/client", - "//internal/attestation/variant", - "//internal/sigstore", - "//internal/staticupload", - "@com_github_aws_aws_sdk_go//aws", - "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", - ], -) - -go_test( - name = "client_test", - srcs = [ - "client_test.go", - "reporter_test.go", - ], - embed = [":client"], - deps = [ - "//internal/api/attestationconfigapi", - "@com_github_stretchr_testify//assert", - ], -) diff --git a/internal/api/attestationconfigapi/cli/client/client.go b/internal/api/attestationconfigapi/cli/client/client.go deleted file mode 100644 index ea867dd1c..000000000 --- a/internal/api/attestationconfigapi/cli/client/client.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -/* -package client contains code to manage CVM versions in Constellation's CDN API. -It is used to upload and delete "latest" versions for AMD SEV-SNP and Intel TDX. -*/ -package client - -import ( - "context" - "errors" - "fmt" - "log/slog" - "path" - "strings" - - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go/aws" - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/sigstore" - - "github.com/edgelesssys/constellation/v2/internal/staticupload" -) - -// VersionFormat is the format of the version name in the S3 bucket. -const VersionFormat = "2006-01-02-15-04" - -// Client manages (modifies) the version information for the attestation variants. -type Client struct { - s3Client *apiclient.Client - s3ClientClose func(ctx context.Context) error - bucketID string - signer sigstore.Signer - cacheWindowSize int - - log *slog.Logger -} - -// New returns a new Client. -func New(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *slog.Logger) (*Client, apiclient.CloseFunc, error) { - s3Client, clientClose, err := apiclient.NewClient(ctx, cfg.Region, cfg.Bucket, cfg.DistributionID, dryRun, log) - if err != nil { - return nil, nil, fmt.Errorf("failed to create s3 storage: %w", err) - } - - repo := &Client{ - s3Client: s3Client, - s3ClientClose: clientClose, - signer: sigstore.NewSigner(cosignPwd, privateKey), - bucketID: cfg.Bucket, - cacheWindowSize: versionWindowSize, - log: log, - } - return repo, clientClose, nil -} - -// DeleteVersion deletes the given version (without .json suffix) from the API. -func (c Client) DeleteVersion(ctx context.Context, attestation variant.Variant, versionStr string) error { - versions, err := c.List(ctx, attestation) - if err != nil { - return fmt.Errorf("fetch version list: %w", err) - } - - ops, err := c.deleteVersion(versions, versionStr) - if err != nil { - return err - } - return executeAllCmds(ctx, c.s3Client, ops) -} - -// List returns the list of versions for the given attestation variant. -func (c Client) List(ctx context.Context, attestation variant.Variant) (attestationconfigapi.List, error) { - versions, err := apiclient.Fetch(ctx, c.s3Client, attestationconfigapi.List{Variant: attestation}) - if err != nil { - var notFoundErr *apiclient.NotFoundError - if errors.As(err, ¬FoundErr) { - return attestationconfigapi.List{Variant: attestation}, nil - } - return attestationconfigapi.List{}, err - } - - versions.Variant = attestation - - return versions, nil -} - -func (c Client) deleteVersion(versions attestationconfigapi.List, versionStr string) (ops []crudCmd, err error) { - versionStr = versionStr + ".json" - ops = append(ops, deleteCmd{ - apiObject: attestationconfigapi.Entry{ - Variant: versions.Variant, - Version: versionStr, - }, - }) - - removedVersions, err := removeVersion(versions, versionStr) - if err != nil { - return nil, err - } - ops = append(ops, putCmd{ - apiObject: removedVersions, - signer: c.signer, - }) - return ops, nil -} - -func (c Client) listCachedVersions(ctx context.Context, attestation variant.Variant) ([]string, error) { - list, err := c.s3Client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(c.bucketID), - Prefix: aws.String(reportVersionDir(attestation)), - }) - if err != nil { - return nil, fmt.Errorf("list objects: %w", err) - } - - var dates []string - for _, obj := range list.Contents { - fileName := path.Base(*obj.Key) - - // The cache contains signature and json files - // We only want the json files - if date, ok := strings.CutSuffix(fileName, ".json"); ok { - dates = append(dates, date) - } - } - return dates, nil -} - -func removeVersion(list attestationconfigapi.List, versionStr string) (removedVersions attestationconfigapi.List, err error) { - versions := list.List - for i, v := range versions { - if v == versionStr { - if i == len(versions)-1 { - removedVersions = attestationconfigapi.List{List: versions[:i], Variant: list.Variant} - } else { - removedVersions = attestationconfigapi.List{List: append(versions[:i], versions[i+1:]...), Variant: list.Variant} - } - return removedVersions, nil - } - } - return attestationconfigapi.List{}, fmt.Errorf("version %s not found in list %v", versionStr, versions) -} - -type crudCmd interface { - Execute(ctx context.Context, c *apiclient.Client) error -} - -type deleteCmd struct { - apiObject apiclient.APIObject -} - -func (d deleteCmd) Execute(ctx context.Context, c *apiclient.Client) error { - return apiclient.DeleteWithSignature(ctx, c, d.apiObject) -} - -type putCmd struct { - apiObject apiclient.APIObject - signer sigstore.Signer -} - -func (p putCmd) Execute(ctx context.Context, c *apiclient.Client) error { - return apiclient.SignAndUpdate(ctx, c, p.apiObject, p.signer) -} - -func executeAllCmds(ctx context.Context, client *apiclient.Client, cmds []crudCmd) error { - for _, cmd := range cmds { - if err := cmd.Execute(ctx, client); err != nil { - return fmt.Errorf("execute operation %+v: %w", cmd, err) - } - } - return nil -} diff --git a/internal/api/attestationconfigapi/cli/client/client_test.go b/internal/api/attestationconfigapi/cli/client/client_test.go deleted file mode 100644 index f46c872d8..000000000 --- a/internal/api/attestationconfigapi/cli/client/client_test.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ -package client - -import ( - "testing" - - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/stretchr/testify/assert" -) - -func TestDeleteAzureSEVSNPVersions(t *testing.T) { - sut := Client{ - bucketID: "bucket", - } - versions := attestationconfigapi.List{List: []string{"2023-01-01.json", "2021-01-01.json", "2019-01-01.json"}} - - ops, err := sut.deleteVersion(versions, "2021-01-01") - - assert := assert.New(t) - assert.NoError(err) - assert.Contains(ops, deleteCmd{ - apiObject: attestationconfigapi.Entry{ - Version: "2021-01-01.json", - }, - }) - - assert.Contains(ops, putCmd{ - apiObject: attestationconfigapi.List{List: []string{"2023-01-01.json", "2019-01-01.json"}}, - }) -} diff --git a/internal/api/attestationconfigapi/cli/client/reporter.go b/internal/api/attestationconfigapi/cli/client/reporter.go deleted file mode 100644 index ff215ca55..000000000 --- a/internal/api/attestationconfigapi/cli/client/reporter.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package client - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "path" - "sort" - "strings" - "time" - - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/edgelesssys/constellation/v2/internal/api/client" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" -) - -// cachedVersionsSubDir is the subdirectory in the bucket where the cached versions are stored. -const cachedVersionsSubDir = "cached-versions" - -// ErrNoNewerVersion is returned if the input version is not newer than the latest API version. -var ErrNoNewerVersion = errors.New("input version is not newer than latest API version") - -func reportVersionDir(attestation variant.Variant) string { - return path.Join(attestationconfigapi.AttestationURLPath, attestation.String(), cachedVersionsSubDir) -} - -// IsInputNewerThanOtherVersion compares the input version with the other version and returns true if the input version is newer. -// This function panics if the input versions are not TDX or SEV-SNP versions. -func IsInputNewerThanOtherVersion(variant variant.Variant, inputVersion, otherVersion any) bool { - var result bool - actionForVariant(variant, - func() { - input := inputVersion.(attestationconfigapi.TDXVersion) - other := otherVersion.(attestationconfigapi.TDXVersion) - result = isInputNewerThanOtherTDXVersion(input, other) - }, - func() { - input := inputVersion.(attestationconfigapi.SEVSNPVersion) - other := otherVersion.(attestationconfigapi.SEVSNPVersion) - result = isInputNewerThanOtherSEVSNPVersion(input, other) - }, - ) - return result -} - -// UploadLatestVersion saves the given version to the cache, determines the smallest -// TCB version in the cache among the last cacheWindowSize versions and updates -// the latest version in the API if there is an update. -// force can be used to bypass the validation logic against the cached versions. -func (c Client) UploadLatestVersion( - ctx context.Context, attestationVariant variant.Variant, - inputVersion, latestVersionInAPI any, - now time.Time, force bool, -) error { - // Validate input versions against configured attestation variant - // This allows us to skip these checks in the individual variant implementations - var err error - actionForVariant(attestationVariant, - func() { - if _, ok := inputVersion.(attestationconfigapi.TDXVersion); !ok { - err = fmt.Errorf("input version %q is not a TDX version", inputVersion) - } - if _, ok := latestVersionInAPI.(attestationconfigapi.TDXVersion); !ok { - err = fmt.Errorf("latest API version %q is not a TDX version", latestVersionInAPI) - } - }, - func() { - if _, ok := inputVersion.(attestationconfigapi.SEVSNPVersion); !ok { - err = fmt.Errorf("input version %q is not a SNP version", inputVersion) - } - if _, ok := latestVersionInAPI.(attestationconfigapi.SEVSNPVersion); !ok { - err = fmt.Errorf("latest API version %q is not a SNP version", latestVersionInAPI) - } - }, - ) - if err != nil { - return err - } - - if err := c.addVersionToCache(ctx, attestationVariant, inputVersion, now); err != nil { - return fmt.Errorf("adding version to cache: %w", err) - } - - // If force is set, immediately update the latest version to the new version in the API. - if force { - return c.uploadAsLatestVersion(ctx, attestationVariant, inputVersion, now) - } - - // Otherwise, check the cached versions and update the latest version in the API if necessary. - versionDates, err := c.listCachedVersions(ctx, attestationVariant) - if err != nil { - return fmt.Errorf("listing existing cached versions: %w", err) - } - if len(versionDates) < c.cacheWindowSize { - c.log.Warn(fmt.Sprintf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize)) - return nil - } - - minVersion, minDate, err := c.findMinVersion(ctx, attestationVariant, versionDates) - if err != nil { - return fmt.Errorf("determining minimal version in cache: %w", err) - } - c.log.Info(fmt.Sprintf("Found minimal version: %+v with date: %s", minVersion, minDate)) - - if !IsInputNewerThanOtherVersion(attestationVariant, minVersion, latestVersionInAPI) { - c.log.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v. Skipping list update", minVersion, latestVersionInAPI)) - return ErrNoNewerVersion - } - - c.log.Info(fmt.Sprintf("Input version: %+v is newer than latest API version: %+v", minVersion, latestVersionInAPI)) - t, err := time.Parse(VersionFormat, minDate) - if err != nil { - return fmt.Errorf("parsing date: %w", err) - } - - if err := c.uploadAsLatestVersion(ctx, attestationVariant, minVersion, t); err != nil { - return fmt.Errorf("uploading as latest version: %w", err) - } - - c.log.Info(fmt.Sprintf("Successfully uploaded new %s version: %+v", attestationVariant, minVersion)) - return nil -} - -// uploadAsLatestVersion uploads the given version and updates the list to set it as the "latest" version. -// The version's name is the UTC timestamp of the date. -// The /list entry stores the version name + .json suffix. -func (c Client) uploadAsLatestVersion(ctx context.Context, variant variant.Variant, inputVersion any, date time.Time) error { - versions, err := c.List(ctx, variant) - if err != nil { - return fmt.Errorf("fetch version list: %w", err) - } - if !variant.Equal(versions.Variant) { - return nil - } - - dateStr := date.Format(VersionFormat) + ".json" - var ops []crudCmd - - obj := apiVersionObject{version: dateStr, variant: variant, cached: false} - obj.setVersion(inputVersion) - ops = append(ops, putCmd{ - apiObject: obj, - signer: c.signer, - }) - - versions.AddVersion(dateStr) - - ops = append(ops, putCmd{ - apiObject: versions, - signer: c.signer, - }) - - return executeAllCmds(ctx, c.s3Client, ops) -} - -// addVersionToCache adds the given version to the cache. -func (c Client) addVersionToCache(ctx context.Context, variant variant.Variant, inputVersion any, date time.Time) error { - dateStr := date.Format(VersionFormat) + ".json" - obj := apiVersionObject{version: dateStr, variant: variant, cached: true} - obj.setVersion(inputVersion) - cmd := putCmd{ - apiObject: obj, - signer: c.signer, - } - return cmd.Execute(ctx, c.s3Client) -} - -// findMinVersion returns the minimal version in the cache among the last cacheWindowSize versions. -func (c Client) findMinVersion( - ctx context.Context, attestationVariant variant.Variant, versionDates []string, -) (any, string, error) { - var getMinimalVersion func() (any, string, error) - - actionForVariant(attestationVariant, - func() { - getMinimalVersion = func() (any, string, error) { - return findMinimalVersion[attestationconfigapi.TDXVersion](ctx, attestationVariant, versionDates, c.s3Client, c.cacheWindowSize) - } - }, - func() { - getMinimalVersion = func() (any, string, error) { - return findMinimalVersion[attestationconfigapi.SEVSNPVersion](ctx, attestationVariant, versionDates, c.s3Client, c.cacheWindowSize) - } - }, - ) - return getMinimalVersion() -} - -func findMinimalVersion[T attestationconfigapi.TDXVersion | attestationconfigapi.SEVSNPVersion]( - ctx context.Context, variant variant.Variant, versionDates []string, - s3Client *client.Client, cacheWindowSize int, -) (T, string, error) { - var minimalVersion *T - var minimalDate string - sort.Sort(sort.Reverse(sort.StringSlice(versionDates))) // sort in reverse order to slice the latest versions - versionDates = versionDates[:cacheWindowSize] - sort.Strings(versionDates) // sort with oldest first to to take the minimal version with the oldest date - - for _, date := range versionDates { - obj, err := client.Fetch(ctx, s3Client, apiVersionObject{version: date + ".json", variant: variant, cached: true}) - if err != nil { - return *new(T), "", fmt.Errorf("get object: %w", err) - } - obj.variant = variant // variant is not set by Fetch, set it manually - - if minimalVersion == nil { - v := obj.getVersion().(T) - minimalVersion = &v - minimalDate = date - continue - } - - // If the current minimal version has newer versions than the one we just fetched, - // update the minimal version to the older version. - if IsInputNewerThanOtherVersion(variant, *minimalVersion, obj.getVersion()) { - v := obj.getVersion().(T) - minimalVersion = &v - minimalDate = date - } - } - - return *minimalVersion, minimalDate, nil -} - -type apiVersionObject struct { - version string `json:"-"` - variant variant.Variant `json:"-"` - cached bool `json:"-"` - snp attestationconfigapi.SEVSNPVersion - tdx attestationconfigapi.TDXVersion -} - -func (a apiVersionObject) MarshalJSON() ([]byte, error) { - var res []byte - var err error - actionForVariant(a.variant, - func() { - res, err = json.Marshal(a.tdx) - }, - func() { - res, err = json.Marshal(a.snp) - }, - ) - return res, err -} - -func (a *apiVersionObject) UnmarshalJSON(data []byte) error { - errTDX := json.Unmarshal(data, &a.tdx) - errSNP := json.Unmarshal(data, &a.snp) - if errTDX == nil || errSNP == nil { - return nil - } - return fmt.Errorf("trying to unmarshal data into both TDX and SNP versions: %w", errors.Join(errTDX, errSNP)) -} - -// JSONPath returns the path to the JSON file for the request to the config api. -// This is the path to the cached version in the S3 bucket. -func (a apiVersionObject) JSONPath() string { - if a.cached { - return path.Join(reportVersionDir(a.variant), a.version) - } - return path.Join(attestationconfigapi.AttestationURLPath, a.variant.String(), a.version) -} - -// ValidateRequest validates the request. -func (a apiVersionObject) ValidateRequest() error { - if !strings.HasSuffix(a.version, ".json") { - return fmt.Errorf("version has no .json suffix") - } - return nil -} - -// Validate is a No-Op. -func (a apiVersionObject) Validate() error { - return nil -} - -// getVersion returns the version. -func (a apiVersionObject) getVersion() any { - var res any - actionForVariant(a.variant, - func() { - res = a.tdx - }, - func() { - res = a.snp - }, - ) - return res -} - -// setVersion sets the version. -func (a *apiVersionObject) setVersion(version any) { - actionForVariant(a.variant, - func() { - a.tdx = version.(attestationconfigapi.TDXVersion) - }, - func() { - a.snp = version.(attestationconfigapi.SEVSNPVersion) - }, - ) -} - -// actionForVariant performs the given action based on the whether variant is a TDX or SEV-SNP variant. -func actionForVariant( - attestationVariant variant.Variant, - tdxAction func(), snpAction func(), -) { - switch attestationVariant { - case variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.GCPSEVSNP{}: - snpAction() - case variant.AzureTDX{}: - tdxAction() - default: - panic(fmt.Sprintf("unsupported attestation variant: %s", attestationVariant)) - } -} - -// isInputNewerThanOtherSEVSNPVersion compares all version fields and returns false if any input field is older, or the versions are equal. -func isInputNewerThanOtherSEVSNPVersion(input, other attestationconfigapi.SEVSNPVersion) bool { - if input == other { - return false - } - if input.TEE < other.TEE { - return false - } - if input.SNP < other.SNP { - return false - } - if input.Microcode < other.Microcode { - return false - } - if input.Bootloader < other.Bootloader { - return false - } - return true -} - -// isInputNewerThanOtherSEVSNPVersion compares all version fields and returns false if any input field is older, or the versions are equal. -func isInputNewerThanOtherTDXVersion(input, other attestationconfigapi.TDXVersion) bool { - if input == other { - return false - } - - if input.PCESVN < other.PCESVN { - return false - } - if input.QESVN < other.QESVN { - return false - } - - // Validate component-wise security version numbers - for idx, inputVersion := range input.TEETCBSVN { - if inputVersion < other.TEETCBSVN[idx] { - return false - } - } - - return true -} diff --git a/internal/api/attestationconfigapi/cli/client/reporter_test.go b/internal/api/attestationconfigapi/cli/client/reporter_test.go deleted file mode 100644 index ded25b020..000000000 --- a/internal/api/attestationconfigapi/cli/client/reporter_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 -*/ -package client - -import ( - "testing" - - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/stretchr/testify/assert" -) - -func TestIsInputNewerThanOtherSEVSNPVersion(t *testing.T) { - newTestCfg := func() attestationconfigapi.SEVSNPVersion { - return attestationconfigapi.SEVSNPVersion{ - Microcode: 93, - TEE: 0, - SNP: 6, - Bootloader: 2, - } - } - - testCases := map[string]struct { - latest attestationconfigapi.SEVSNPVersion - input attestationconfigapi.SEVSNPVersion - expect bool - }{ - "input is older than latest": { - input: func(c attestationconfigapi.SEVSNPVersion) attestationconfigapi.SEVSNPVersion { - c.Microcode-- - return c - }(newTestCfg()), - latest: newTestCfg(), - expect: false, - }, - "input has greater and smaller version field than latest": { - input: func(c attestationconfigapi.SEVSNPVersion) attestationconfigapi.SEVSNPVersion { - c.Microcode++ - c.Bootloader-- - return c - }(newTestCfg()), - latest: newTestCfg(), - expect: false, - }, - "input is newer than latest": { - input: func(c attestationconfigapi.SEVSNPVersion) attestationconfigapi.SEVSNPVersion { - c.TEE++ - return c - }(newTestCfg()), - latest: newTestCfg(), - expect: true, - }, - "input is equal to latest": { - input: newTestCfg(), - latest: newTestCfg(), - expect: false, - }, - } - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - isNewer := isInputNewerThanOtherSEVSNPVersion(tc.input, tc.latest) - assert.Equal(t, tc.expect, isNewer) - }) - } -} - -func TestIsInputNewerThanOtherTDXVersion(t *testing.T) { - newTestVersion := func() attestationconfigapi.TDXVersion { - return attestationconfigapi.TDXVersion{ - QESVN: 1, - PCESVN: 2, - TEETCBSVN: [16]byte{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, - QEVendorID: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - XFAM: [8]byte{0, 1, 2, 3, 4, 5, 6, 7}, - } - } - - testCases := map[string]struct { - latest attestationconfigapi.TDXVersion - input attestationconfigapi.TDXVersion - expect bool - }{ - "input is older than latest": { - input: func(c attestationconfigapi.TDXVersion) attestationconfigapi.TDXVersion { - c.QESVN-- - return c - }(newTestVersion()), - latest: newTestVersion(), - expect: false, - }, - "input has greater and smaller version field than latest": { - input: func(c attestationconfigapi.TDXVersion) attestationconfigapi.TDXVersion { - c.QESVN++ - c.PCESVN-- - return c - }(newTestVersion()), - latest: newTestVersion(), - expect: false, - }, - "input is newer than latest": { - input: func(c attestationconfigapi.TDXVersion) attestationconfigapi.TDXVersion { - c.QESVN++ - return c - }(newTestVersion()), - latest: newTestVersion(), - expect: true, - }, - "input is equal to latest": { - input: newTestVersion(), - latest: newTestVersion(), - expect: false, - }, - "tee tcb svn is newer": { - input: func(c attestationconfigapi.TDXVersion) attestationconfigapi.TDXVersion { - c.TEETCBSVN[4]++ - return c - }(newTestVersion()), - latest: newTestVersion(), - expect: true, - }, - "xfam is different": { - input: func(c attestationconfigapi.TDXVersion) attestationconfigapi.TDXVersion { - c.XFAM[3]++ - return c - }(newTestVersion()), - latest: newTestVersion(), - expect: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - isNewer := isInputNewerThanOtherTDXVersion(tc.input, tc.latest) - assert.Equal(t, tc.expect, isNewer) - }) - } -} diff --git a/internal/api/attestationconfigapi/cli/compare.go b/internal/api/attestationconfigapi/cli/compare.go deleted file mode 100644 index 36113c689..000000000 --- a/internal/api/attestationconfigapi/cli/compare.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ -package main - -import ( - "fmt" - "os" - "slices" - - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi/cli/client" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/verify" - "github.com/google/go-tdx-guest/proto/tdx" - "github.com/spf13/afero" - "github.com/spf13/cobra" -) - -func newCompareCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "compare VARIANT FILE [FILE...]", - Short: "Returns the minimum version of all given attestation reports.", - Long: "Compare a list of attestation reports and return the report with the minimum version.", - Example: "cli compare azure-sev-snp report1.json report2.json", - Args: cobra.MatchAll(cobra.MinimumNArgs(2), arg0isAttestationVariant()), - RunE: runCompare, - } - - return cmd -} - -func runCompare(cmd *cobra.Command, args []string) error { - cmd.SetOut(os.Stdout) - - variant, err := variant.FromString(args[0]) - if err != nil { - return fmt.Errorf("parsing variant: %w", err) - } - - return compare(cmd, variant, args[1:], file.NewHandler(afero.NewOsFs())) -} - -func compare(cmd *cobra.Command, attestationVariant variant.Variant, files []string, fs file.Handler) (retErr error) { - if !slices.Contains([]variant.Variant{variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.GCPSEVSNP{}, variant.AzureTDX{}}, attestationVariant) { - return fmt.Errorf("variant %s not supported", attestationVariant) - } - - lowestVersion, err := compareVersions(attestationVariant, files, fs) - if err != nil { - return fmt.Errorf("comparing versions: %w", err) - } - - cmd.Println(lowestVersion) - return nil -} - -func compareVersions(attestationVariant variant.Variant, files []string, fs file.Handler) (string, error) { - readReport := readSNPReport - if attestationVariant.Equal(variant.AzureTDX{}) { - readReport = readTDXReport - } - - lowestVersion := files[0] - lowestReport, err := readReport(files[0], fs) - if err != nil { - return "", fmt.Errorf("reading report: %w", err) - } - - for _, file := range files[1:] { - report, err := readReport(file, fs) - if err != nil { - return "", fmt.Errorf("reading report: %w", err) - } - - if client.IsInputNewerThanOtherVersion(attestationVariant, lowestReport, report) { - lowestVersion = file - lowestReport = report - } - } - - return lowestVersion, nil -} - -func readSNPReport(file string, fs file.Handler) (any, error) { - var report verify.Report - if err := fs.ReadJSON(file, &report); err != nil { - return nil, fmt.Errorf("reading snp report: %w", err) - } - return convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB), nil -} - -func readTDXReport(file string, fs file.Handler) (any, error) { - var report *tdx.QuoteV4 - if err := fs.ReadJSON(file, &report); err != nil { - return nil, fmt.Errorf("reading tdx report: %w", err) - } - return convertQuoteToTDXVersion(report), nil -} diff --git a/internal/api/attestationconfigapi/cli/delete.go b/internal/api/attestationconfigapi/cli/delete.go index 6c4b6ca81..d0b0f447f 100644 --- a/internal/api/attestationconfigapi/cli/delete.go +++ b/internal/api/attestationconfigapi/cli/delete.go @@ -1,23 +1,19 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main import ( - "context" "errors" "fmt" "log/slog" "path" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi/cli/client" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" "github.com/spf13/cobra" @@ -26,21 +22,21 @@ import ( // newDeleteCmd creates the delete command. func newDeleteCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "delete VARIANT KIND ", + Use: "delete {azure|aws} {snp-report|guest-firmware} ", Short: "Delete an object from the attestationconfig API", Long: "Delete a specific object version from the config api. is the name of the object to delete (without .json suffix)", - Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli delete azure-sev-snp attestation-report 1.0.0", - Args: cobra.MatchAll(cobra.ExactArgs(3), arg0isAttestationVariant(), isValidKind(1)), + Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli delete azure snp-report 1.0.0", + Args: cobra.MatchAll(cobra.ExactArgs(3), isCloudProvider(0), isValidKind(1)), PreRunE: envCheck, RunE: runDelete, } recursivelyCmd := &cobra.Command{ - Use: "recursive {aws-sev-snp|azure-sev-snp|azure-tdx|gcp-sev-snp}", + Use: "recursive {azure|aws}", Short: "delete all objects from the API path constellation/v1/attestation/", Long: "Delete all objects from the API path constellation/v1/attestation/", - Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli delete recursive azure-sev-snp", - Args: cobra.MatchAll(cobra.ExactArgs(1), arg0isAttestationVariant()), + Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli delete recursive azure", + Args: cobra.MatchAll(cobra.ExactArgs(1), isCloudProvider(0)), RunE: runRecursiveDelete, } @@ -62,7 +58,7 @@ func runDelete(cmd *cobra.Command, args []string) (retErr error) { Region: deleteCfg.region, DistributionID: deleteCfg.distribution, } - client, clientClose, err := client.New(cmd.Context(), cfg, + client, clientClose, err := attestationconfigapi.NewClient(cmd.Context(), cfg, []byte(cosignPwd), []byte(privateKey), false, 1, log) if err != nil { return fmt.Errorf("create attestation client: %w", err) @@ -74,7 +70,14 @@ func runDelete(cmd *cobra.Command, args []string) (retErr error) { } }() - return deleteEntry(cmd.Context(), client, deleteCfg) + switch deleteCfg.provider { + case cloudprovider.AWS: + return deleteAWS(cmd.Context(), client, deleteCfg) + case cloudprovider.Azure: + return deleteAzure(cmd.Context(), client, deleteCfg) + default: + return fmt.Errorf("unsupported cloud provider: %s", deleteCfg.provider) + } } func runRecursiveDelete(cmd *cobra.Command, args []string) (retErr error) { @@ -102,13 +105,21 @@ func runRecursiveDelete(cmd *cobra.Command, args []string) (retErr error) { } }() - deletePath := path.Join(attestationconfigapi.AttestationURLPath, deleteCfg.variant.String()) + var deletePath string + switch deleteCfg.provider { + case cloudprovider.AWS: + deletePath = path.Join(attestationconfigapi.AttestationURLPath, variant.AWSSEVSNP{}.String()) + case cloudprovider.Azure: + deletePath = path.Join(attestationconfigapi.AttestationURLPath, variant.AzureSEVSNP{}.String()) + default: + return fmt.Errorf("unsupported cloud provider: %s", deleteCfg.provider) + } - return deleteEntryRecursive(cmd.Context(), deletePath, client, deleteCfg) + return deleteRecursive(cmd.Context(), deletePath, client, deleteCfg) } type deleteConfig struct { - variant variant.Variant + provider cloudprovider.Provider kind objectKind version string region string @@ -135,15 +146,12 @@ func newDeleteConfig(cmd *cobra.Command, args [3]string) (deleteConfig, error) { } apiCfg := getAPIEnvironment(testing) - variant, err := variant.FromString(args[0]) - if err != nil { - return deleteConfig{}, fmt.Errorf("invalid attestation variant: %q: %w", args[0], err) - } + provider := cloudprovider.FromString(args[0]) kind := kindFromString(args[1]) version := args[2] return deleteConfig{ - variant: variant, + provider: provider, kind: kind, version: version, region: region, @@ -153,44 +161,3 @@ func newDeleteConfig(cmd *cobra.Command, args [3]string) (deleteConfig, error) { cosignPublicKey: apiCfg.cosignPublicKey, }, nil } - -func deleteEntry(ctx context.Context, client *client.Client, cfg deleteConfig) error { - if cfg.kind != attestationReport { - return fmt.Errorf("kind %s not supported", cfg.kind) - } - - return client.DeleteVersion(ctx, cfg.variant, cfg.version) -} - -func deleteEntryRecursive(ctx context.Context, path string, client *staticupload.Client, cfg deleteConfig) error { - resp, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(cfg.bucket), - Prefix: aws.String(path), - }) - if err != nil { - return err - } - - // Delete all objects in the path. - objIDs := make([]s3types.ObjectIdentifier, len(resp.Contents)) - for i, obj := range resp.Contents { - objIDs[i] = s3types.ObjectIdentifier{Key: obj.Key} - } - if len(objIDs) > 0 { - _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ - Bucket: aws.String(cfg.bucket), - Delete: &s3types.Delete{ - Objects: objIDs, - Quiet: toPtr(true), - }, - }) - if err != nil { - return err - } - } - return nil -} - -func toPtr[T any](v T) *T { - return &v -} diff --git a/internal/api/attestationconfigapi/cli/e2e/test.sh.in b/internal/api/attestationconfigapi/cli/e2e/test.sh.in index 647fd6e08..773443df4 100755 --- a/internal/api/attestationconfigapi/cli/e2e/test.sh.in +++ b/internal/api/attestationconfigapi/cli/e2e/test.sh.in @@ -19,67 +19,36 @@ configapi_cli=$(realpath @@CONFIGAPI_CLI@@) stat "${configapi_cli}" >> /dev/null configapi_cli="${configapi_cli} --testing" ###### script body ###### -attestationVariant=$1 -readonly attestationVariant +function variant() { + if [[ $1 == "aws" ]]; then + echo "aws-sev-snp" + return 0 + elif [[ $1 == "azure" ]]; then + echo "azure-sev-snp" + return 0 + else + echo "Unknown CSP: $1" + exit 1 + fi +} + +csp=$1 +readonly csp +attestationType=$(variant "$csp") readonly region="eu-west-1" readonly bucket="resource-api-testing" tmpdir=$(mktemp -d) readonly tmpdir -registerExitHandler "rm -rf ${tmpdir}" +registerExitHandler "rm -rf $tmpdir" # empty the bucket version state -${configapi_cli} delete recursive "${attestationVariant}" --region "${region}" --bucket "${bucket}" +${configapi_cli} delete recursive "$csp" --region "$region" --bucket "$bucket" -readonly current_report_path="${tmpdir}/attestationReportCurrent.json" -readonly report_path="${tmpdir}/attestationReport.json" -readonly older_report_path="${tmpdir}/attestationReportOld.json" - -if [[ ${attestationVariant} == *-tdx ]]; then - cat << EOF > "${current_report_path}" -{ - "header": { - "qe_svn": "AAA=", - "pce_svn": "AAA=", - "qe_vendor_id": "KioqKioqKioqKioqKioqKg==" - }, - "td_quote_body": { - "tee_tcb_svn": "AAAAAAAAAAAAAAAAAAAAAA==", - "xfam": "AAAAAAAAAAA=" - } -} -EOF - # the high version numbers ensure that it's newer than the current latest value - cat << EOF > "${report_path}" -{ - "header": { - "qe_svn": "//8=", - "pce_svn": "//8=", - "qe_vendor_id": "KioqKioqKioqKioqKioqKg==" - }, - "td_quote_body": { - "tee_tcb_svn": "/////////////////////w==", - "xfam": "AQIDBAUGBwg=" - } -} -EOF - # has an older version - cat << EOF > "${older_report_path}" -{ - "header": { - "qe_svn": "//8=", - "pce_svn": "/v8=", - "qe_vendor_id": "KioqKioqKioqKioqKioqKg==" - }, - "td_quote_body": { - "tee_tcb_svn": "/////////////////////g==", - "xfam": "AQIDBAUGBwg=" - } -} -EOF -elif [[ ${attestationVariant} == *-sev-snp ]]; then - cat << EOF > "${current_report_path}" +# the high version numbers ensure that it's newer than the current latest value +readonly current_report_path="$tmpdir/currentSnpReport.json" +cat << EOF > "$current_report_path" { "snp_report": { "reported_tcb": { @@ -103,105 +72,90 @@ elif [[ ${attestationVariant} == *-sev-snp ]]; then } } EOF - # the high version numbers ensure that it's newer than the current latest value - cat << EOF > "${report_path}" -{ - "snp_report": { - "reported_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 255 - }, - "committed_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 255 - }, - "launch_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 255 - } - } -} -EOF - # has an older version - cat << EOF > "${older_report_path}" -{ - "snp_report": { - "reported_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 254 - }, - "committed_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 254 - }, - "launch_tcb": { - "bootloader": 255, - "tee": 255, - "snp": 255, - "microcode": 254 - } - } -} -EOF -else - echo "Unknown attestation variant: ${attestationVariant}" - exit 1 -fi - # upload a fake latest version for the fetcher -${configapi_cli} upload "${attestationVariant}" attestation-report "${current_report_path}" --force --upload-date "2000-01-01-01-01" --region "${region}" --bucket "${bucket}" +${configapi_cli} upload "$csp" snp-report "$current_report_path" --force --upload-date "2000-01-01-01-01" --region "$region" --bucket "$bucket" + +# the high version numbers ensure that it's newer than the current latest value +readonly report_path="$tmpdir/snpReport.json" +cat << EOF > "$report_path" +{ + "snp_report": { + "reported_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 255 + }, + "committed_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 255 + }, + "launch_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 255 + } + } +} +EOF + +# has an older version +readonly older_report_path="$tmpdir/snpReportOld.json" +cat << EOF > "$older_report_path" +{ + "snp_report": { + "reported_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 254 + }, + "committed_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 254 + }, + "launch_tcb": { + "bootloader": 255, + "tee": 255, + "snp": 255, + "microcode": 254 + } + } +} +EOF # report 3 versions with different dates to fill the reporter cache readonly date_oldest="2023-02-01-03-04" -${configapi_cli} upload "${attestationVariant}" attestation-report "${older_report_path}" --upload-date "${date_oldest}" --region "${region}" --bucket "${bucket}" --cache-window-size 3 +${configapi_cli} upload "$csp" snp-report "$older_report_path" --upload-date "$date_oldest" --region "$region" --bucket "$bucket" --cache-window-size 3 readonly date_older="2023-02-02-03-04" -${configapi_cli} upload "${attestationVariant}" attestation-report "${older_report_path}" --upload-date "${date_older}" --region "${region}" --bucket "${bucket}" --cache-window-size 3 +${configapi_cli} upload "$csp" snp-report "$older_report_path" --upload-date "$date_older" --region "$region" --bucket "$bucket" --cache-window-size 3 readonly date="2023-02-03-03-04" -${configapi_cli} upload "${attestationVariant}" attestation-report "${report_path}" --upload-date "${date}" --region "${region}" --bucket "${bucket}" --cache-window-size 3 +${configapi_cli} upload "$csp" snp-report "$report_path" --upload-date "$date" --region "$region" --bucket "$bucket" --cache-window-size 3 # expect that $date_oldest is served as latest version -basepath="constellation/v1/attestation/${attestationVariant}" +basepath="constellation/v1/attestation/${attestationType}" baseurl="https://d33dzgxuwsgbpw.cloudfront.net/${basepath}" -if ! curl -fsSL "${baseurl}/${date_oldest}.json" > version.json; then +if ! curl -fsSL "${baseurl}"/${date_oldest}.json > version.json; then echo "Checking for uploaded version file ${basepath}/${date_oldest}.json: request returned ${?}" exit 1 fi - -if [[ ${attestationVariant} == *-tdx ]]; then - # check that version values are equal to expected - if ! cmp -s <(echo -n '{"qeSVN":65535,"pceSVN":65534,"teeTCBSVN":[255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,254],"qeVendorID":[42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42],"xfam":[1,2,3,4,5,6,7,8]}') version.json; then - echo "The version content:" - cat version.json - echo " is not equal to the expected version content:" - echo '{"qeSVN":65535,"pceSVN":65534,"teeTCBSVN":[255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,254],"qeVendorID":[42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42],"xfam":[1,2,3,4,5,6,7,8]}' - exit 1 - fi -elif [[ ${attestationVariant} == *-sev-snp ]]; then - # check that version values are equal to expected - if ! cmp -s <(echo -n '{"bootloader":255,"tee":255,"snp":255,"microcode":254}') version.json; then - echo "The version content:" - cat version.json - echo " is not equal to the expected version content:" - echo '{"bootloader":255,"tee":255,"snp":255,"microcode":254}' - exit 1 - fi +# check that version values are equal to expected +if ! cmp -s <(echo -n '{"bootloader":255,"tee":255,"snp":255,"microcode":254}') version.json; then + echo "The version content:" + cat version.json + echo " is not equal to the expected version content:" + echo '{"bootloader":255,"tee":255,"snp":255,"microcode":254}' + exit 1 fi - -if ! curl -fsSL "${baseurl}/${date_oldest}.json.sig" > /dev/null; then +if ! curl -fsSL "${baseurl}"/${date_oldest}.json.sig > /dev/null; then echo "Checking for uploaded version signature file ${basepath}/${date_oldest}.json.sig: request returned ${?}" exit 1 fi - # check list endpoint if ! curl -fsSL "${baseurl}"/list > list.json; then echo "Checking for uploaded list file ${basepath}/list: request returned ${?}" @@ -217,28 +171,28 @@ if ! cmp -s <(echo -n '["2023-02-01-03-04.json","2000-01-01-01-01.json"]') list. fi # check that the other versions are not uploaded -http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}/${date_older}.json") -if [[ ${http_code} -ne 404 ]]; then +http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}"/${date_older}.json) +if [[ $http_code -ne 404 ]]; then echo "Expected HTTP code 404 for: ${basepath}/${date_older}.json, but got ${http_code}" exit 1 fi -http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}/${date}.json.sig") -if [[ ${http_code} -ne 404 ]]; then +http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}"/${date}.json.sig) +if [[ $http_code -ne 404 ]]; then echo "Expected HTTP code 404 for: ${basepath}/${date}.json, but got ${http_code}" exit 1 fi -${configapi_cli} delete "${attestationVariant}" attestation-report "${date_oldest}" --region "${region}" --bucket "${bucket}" +${configapi_cli} delete "$csp" snp-report "$date_oldest" --region "$region" --bucket "$bucket" # Omit -f to check for 404. We want to check that a file was deleted, therefore we expect the query to fail. -http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}/${date_oldest}.json") -if [[ ${http_code} -ne 404 ]]; then +http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}"/${date_oldest}.json) +if [[ $http_code -ne 404 ]]; then echo "Expected HTTP code 404 for: ${basepath}/${date_oldest}.json, but got ${http_code}" exit 1 fi # Omit -f to check for 404. We want to check that a file was deleted, therefore we expect the query to fail. -http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}/${date_oldest}.json.sig") -if [[ ${http_code} -ne 404 ]]; then +http_code=$(curl -sSL -w '%{http_code}\n' -o /dev/null "${baseurl}"/${date_oldest}.json.sig) +if [[ $http_code -ne 404 ]]; then echo "Expected HTTP code 404 for: ${basepath}/${date_oldest}.json, but got ${http_code}" exit 1 fi diff --git a/internal/api/attestationconfigapi/cli/main.go b/internal/api/attestationconfigapi/cli/main.go index 76a0e6aef..e6e951f1b 100644 --- a/internal/api/attestationconfigapi/cli/main.go +++ b/internal/api/attestationconfigapi/cli/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -27,11 +27,8 @@ const ( distributionID = constants.CDNDefaultDistributionID envCosignPwd = "COSIGN_PASSWORD" envCosignPrivateKey = "COSIGN_PRIVATE_KEY" - // versionWindowSize defines the number of versions to be considered for the latest version. - // Through our weekly e2e tests, each week 2 versions are uploaded: - // One from a stable release, and one from a debug image. - // A window size of 6 ensures we update only after a version has been "stable" for 3 weeks. - versionWindowSize = 6 + // versionWindowSize defines the number of versions to be considered for the latest version. Each week 5 versions are uploaded for each node of the verify cluster. + versionWindowSize = 15 ) var ( @@ -59,7 +56,6 @@ func newRootCmd() *cobra.Command { rootCmd.AddCommand(newUploadCmd()) rootCmd.AddCommand(newDeleteCmd()) - rootCmd.AddCommand(newCompareCmd()) return rootCmd } diff --git a/internal/api/attestationconfigapi/cli/upload.go b/internal/api/attestationconfigapi/cli/upload.go index dbfba690c..54036009a 100644 --- a/internal/api/attestationconfigapi/cli/upload.go +++ b/internal/api/attestationconfigapi/cli/upload.go @@ -1,13 +1,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main import ( "context" - "encoding/binary" "errors" "fmt" "log/slog" @@ -15,33 +14,31 @@ import ( "time" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi/cli/client" - "github.com/edgelesssys/constellation/v2/internal/api/fetcher" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" "github.com/edgelesssys/constellation/v2/internal/verify" - "github.com/google/go-tdx-guest/proto/tdx" "github.com/spf13/afero" "github.com/spf13/cobra" ) func newUploadCmd() *cobra.Command { uploadCmd := &cobra.Command{ - Use: "upload VARIANT KIND FILE", + Use: "upload {azure|aws} {snp-report|guest-firmware} ", Short: "Upload an object to the attestationconfig API", - Long: fmt.Sprintf("Upload a new object to the attestationconfig API. For snp-reports the new object is added to a cache folder first.\n"+ + Long: fmt.Sprintf("Upload a new object to the attestationconfig API. For snp-reports the new object is added to a cache folder first."+ "The CLI then determines the lowest version within the cache-window present in the cache and writes that value to the config api if necessary. "+ - "For guest-firmware objects the object is added to the API directly.\n"+ - "Please authenticate with AWS through your preferred method (e.g. environment variables, CLI) "+ + "For guest-firmware objects the object is added to the API directly. "+ + "Please authenticate with AWS through your preferred method (e.g. environment variables, CLI)"+ "to be able to upload to S3. Set the %s and %s environment variables to authenticate with cosign.", envCosignPrivateKey, envCosignPwd, ), - Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli upload azure-sev-snp attestation-report /some/path/report.json", + Example: "COSIGN_PASSWORD=$CPW COSIGN_PRIVATE_KEY=$CKEY cli upload azure snp-report /some/path/report.json", - Args: cobra.MatchAll(cobra.ExactArgs(3), arg0isAttestationVariant(), isValidKind(1)), + Args: cobra.MatchAll(cobra.ExactArgs(3), isCloudProvider(0), isValidKind(1)), PreRunE: envCheck, RunE: runUpload, } @@ -71,15 +68,18 @@ func runUpload(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("parsing cli flags: %w", err) } - client, clientClose, err := client.New(ctx, + client, clientClose, err := attestationconfigapi.NewClient( + ctx, staticupload.Config{ Bucket: uploadCfg.bucket, Region: uploadCfg.region, DistributionID: uploadCfg.distribution, }, - []byte(cosignPwd), []byte(privateKey), - false, uploadCfg.cacheWindowSize, log, - ) + []byte(cosignPwd), + []byte(privateKey), + false, + uploadCfg.cacheWindowSize, + log) defer func() { err := clientClose(cmd.Context()) @@ -92,57 +92,54 @@ func runUpload(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("creating client: %w", err) } - return uploadReport(ctx, client, uploadCfg, file.NewHandler(afero.NewOsFs()), log) + var attesation variant.Variant + switch uploadCfg.provider { + case cloudprovider.AWS: + attesation = variant.AWSSEVSNP{} + case cloudprovider.Azure: + attesation = variant.AzureSEVSNP{} + default: + return fmt.Errorf("unsupported cloud provider: %s", uploadCfg.provider) + } + + return uploadReport(ctx, attesation, client, uploadCfg, file.NewHandler(afero.NewOsFs()), log) } -func uploadReport( - ctx context.Context, apiClient *client.Client, - cfg uploadConfig, fs file.Handler, log *slog.Logger, +func uploadReport(ctx context.Context, + attestation variant.Variant, + client *attestationconfigapi.Client, + cfg uploadConfig, + fs file.Handler, + log *slog.Logger, ) error { - if cfg.kind != attestationReport { + if cfg.kind != snpReport { return fmt.Errorf("kind %s not supported", cfg.kind) } - apiFetcher := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey) - latestVersionInAPI, err := apiFetcher.FetchLatestVersion(ctx, cfg.variant) + log.Info(fmt.Sprintf("Reading SNP report from file: %s", cfg.path)) + var report verify.Report + if err := fs.ReadJSON(cfg.path, &report); err != nil { + return fmt.Errorf("reading snp report: %w", err) + } + + inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB) + log.Info(fmt.Sprintf("Input report: %+v", inputVersion)) + + latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation) if err != nil { - var notFoundErr *fetcher.NotFoundError - if errors.As(err, ¬FoundErr) { + if errors.Is(err, attestationconfigapi.ErrNoVersionsFound) { log.Info("No versions found in API, but assuming that we are uploading the first version.") } else { return fmt.Errorf("fetching latest version: %w", err) } } - var newVersion, latestVersion any - switch cfg.variant { - case variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.GCPSEVSNP{}: - latestVersion = latestVersionInAPI.SEVSNPVersion - - log.Info(fmt.Sprintf("Reading SNP report from file: %s", cfg.path)) - newVersion, err = readSNPReport(cfg.path, fs) - if err != nil { - return err + latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion + if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil { + if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) { + log.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion)) + return nil } - log.Info(fmt.Sprintf("Input SNP report: %+v", newVersion)) - - case variant.AzureTDX{}: - latestVersion = latestVersionInAPI.TDXVersion - - log.Info(fmt.Sprintf("Reading TDX report from file: %s", cfg.path)) - newVersion, err = readTDXReport(cfg.path, fs) - if err != nil { - return err - } - log.Info(fmt.Sprintf("Input TDX report: %+v", newVersion)) - - default: - return fmt.Errorf("variant %s not supported", cfg.variant) - } - - if err := apiClient.UploadLatestVersion( - ctx, cfg.variant, newVersion, latestVersion, cfg.uploadDate, cfg.force, - ); err != nil && !errors.Is(err, client.ErrNoNewerVersion) { return fmt.Errorf("updating latest version: %w", err) } @@ -158,18 +155,8 @@ func convertTCBVersionToSNPVersion(tcb verify.TCBVersion) attestationconfigapi.S } } -func convertQuoteToTDXVersion(quote *tdx.QuoteV4) attestationconfigapi.TDXVersion { - return attestationconfigapi.TDXVersion{ - QESVN: binary.LittleEndian.Uint16(quote.Header.QeSvn), - PCESVN: binary.LittleEndian.Uint16(quote.Header.PceSvn), - QEVendorID: [16]byte(quote.Header.QeVendorId), - XFAM: [8]byte(quote.TdQuoteBody.Xfam), - TEETCBSVN: [16]byte(quote.TdQuoteBody.TeeTcbSvn), - } -} - type uploadConfig struct { - variant variant.Variant + provider cloudprovider.Provider kind objectKind path string uploadDate time.Time @@ -189,7 +176,7 @@ func newConfig(cmd *cobra.Command, args [3]string) (uploadConfig, error) { } uploadDate := time.Now() if dateStr != "" { - uploadDate, err = time.Parse(client.VersionFormat, dateStr) + uploadDate, err = time.Parse(attestationconfigapi.VersionFormat, dateStr) if err != nil { return uploadConfig{}, fmt.Errorf("parsing date: %w", err) } @@ -221,16 +208,12 @@ func newConfig(cmd *cobra.Command, args [3]string) (uploadConfig, error) { return uploadConfig{}, fmt.Errorf("getting cache window size: %w", err) } - variant, err := variant.FromString(args[0]) - if err != nil { - return uploadConfig{}, fmt.Errorf("invalid attestation variant: %q: %w", args[0], err) - } - + provider := cloudprovider.FromString(args[0]) kind := kindFromString(args[1]) path := args[2] return uploadConfig{ - variant: variant, + provider: provider, kind: kind, path: path, uploadDate: uploadDate, diff --git a/internal/api/attestationconfigapi/cli/validargs.go b/internal/api/attestationconfigapi/cli/validargs.go index 85a7f72dd..033aaa0a3 100644 --- a/internal/api/attestationconfigapi/cli/validargs.go +++ b/internal/api/attestationconfigapi/cli/validargs.go @@ -1,39 +1,32 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main import ( - "errors" "fmt" "strings" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/spf13/cobra" ) -func arg0isAttestationVariant() cobra.PositionalArgs { +func isCloudProvider(arg int) cobra.PositionalArgs { return func(_ *cobra.Command, args []string) error { - attestationVariant, err := variant.FromString(args[0]) - if err != nil { - return errors.New("argument 0 isn't a valid attestation variant") - } - switch attestationVariant { - case variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.AzureTDX{}, variant.GCPSEVSNP{}: - return nil - default: - return errors.New("argument 0 isn't a supported attestation variant") + if provider := cloudprovider.FromString(args[arg]); provider == cloudprovider.Unknown { + return fmt.Errorf("argument %s isn't a valid cloud provider", args[arg]) } + return nil } } func isValidKind(arg int) cobra.PositionalArgs { return func(_ *cobra.Command, args []string) error { if kind := kindFromString(args[arg]); kind == unknown { - return fmt.Errorf("argument %s isn't a valid kind: must be one of [%q, %q]", args[arg], attestationReport, guestFirmware) + return fmt.Errorf("argument %s isn't a valid kind", args[arg]) } return nil } @@ -44,15 +37,15 @@ type objectKind string const ( // unknown is the default objectKind and does nothing. - unknown objectKind = "unknown-kind" - attestationReport objectKind = "attestation-report" - guestFirmware objectKind = "guest-firmware" + unknown objectKind = "unknown-kind" + snpReport objectKind = "snp-report" + guestFirmware objectKind = "guest-firmware" ) func kindFromString(s string) objectKind { lower := strings.ToLower(s) switch objectKind(lower) { - case attestationReport, guestFirmware: + case snpReport, guestFirmware: return objectKind(lower) default: return unknown diff --git a/internal/api/attestationconfigapi/client.go b/internal/api/attestationconfigapi/client.go new file mode 100644 index 000000000..583e3bba4 --- /dev/null +++ b/internal/api/attestationconfigapi/client.go @@ -0,0 +1,182 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ +package attestationconfigapi + +import ( + "context" + "errors" + "fmt" + "log/slog" + "time" + + apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/sigstore" + + "github.com/edgelesssys/constellation/v2/internal/staticupload" +) + +// VersionFormat is the format of the version name in the S3 bucket. +const VersionFormat = "2006-01-02-15-04" + +// Client manages (modifies) the version information for the attestation variants. +type Client struct { + s3Client *apiclient.Client + s3ClientClose func(ctx context.Context) error + bucketID string + signer sigstore.Signer + cacheWindowSize int +} + +// NewClient returns a new Client. +func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *slog.Logger) (*Client, apiclient.CloseFunc, error) { + s3Client, clientClose, err := apiclient.NewClient(ctx, cfg.Region, cfg.Bucket, cfg.DistributionID, dryRun, log) + if err != nil { + return nil, nil, fmt.Errorf("failed to create s3 storage: %w", err) + } + + repo := &Client{ + s3Client: s3Client, + s3ClientClose: clientClose, + signer: sigstore.NewSigner(cosignPwd, privateKey), + bucketID: cfg.Bucket, + cacheWindowSize: versionWindowSize, + } + return repo, clientClose, nil +} + +// uploadSEVSNPVersion uploads the latest version numbers of the Azure SEVSNP. Then version name is the UTC timestamp of the date. The /list entry stores the version name + .json suffix. +func (a Client) uploadSEVSNPVersion(ctx context.Context, attestation variant.Variant, version SEVSNPVersion, date time.Time) error { + versions, err := a.List(ctx, attestation) + if err != nil { + return fmt.Errorf("fetch version list: %w", err) + } + ops := a.constructUploadCmd(attestation, version, versions, date) + + return executeAllCmds(ctx, a.s3Client, ops) +} + +// DeleteSEVSNPVersion deletes the given version (without .json suffix) from the API. +func (a Client) DeleteSEVSNPVersion(ctx context.Context, attestation variant.Variant, versionStr string) error { + versions, err := a.List(ctx, attestation) + if err != nil { + return fmt.Errorf("fetch version list: %w", err) + } + + ops, err := a.deleteSEVSNPVersion(versions, versionStr) + if err != nil { + return err + } + return executeAllCmds(ctx, a.s3Client, ops) +} + +// List returns the list of versions for the given attestation variant. +func (a Client) List(ctx context.Context, attestation variant.Variant) (SEVSNPVersionList, error) { + if !attestation.Equal(variant.AzureSEVSNP{}) && !attestation.Equal(variant.AWSSEVSNP{}) { + return SEVSNPVersionList{}, fmt.Errorf("unsupported attestation variant: %s", attestation) + } + + versions, err := apiclient.Fetch(ctx, a.s3Client, SEVSNPVersionList{variant: attestation}) + if err != nil { + var notFoundErr *apiclient.NotFoundError + if errors.As(err, ¬FoundErr) { + return SEVSNPVersionList{variant: attestation}, nil + } + return SEVSNPVersionList{}, err + } + + versions.variant = attestation + + return versions, nil +} + +func (a Client) deleteSEVSNPVersion(versions SEVSNPVersionList, versionStr string) (ops []crudCmd, err error) { + versionStr = versionStr + ".json" + ops = append(ops, deleteCmd{ + apiObject: SEVSNPVersionAPI{ + Variant: versions.variant, + Version: versionStr, + }, + }) + + removedVersions, err := removeVersion(versions, versionStr) + if err != nil { + return nil, err + } + ops = append(ops, putCmd{ + apiObject: removedVersions, + signer: a.signer, + }) + return ops, nil +} + +func (a Client) constructUploadCmd(attestation variant.Variant, version SEVSNPVersion, versionNames SEVSNPVersionList, date time.Time) []crudCmd { + if !attestation.Equal(versionNames.variant) { + return nil + } + + dateStr := date.Format(VersionFormat) + ".json" + var res []crudCmd + + res = append(res, putCmd{ + apiObject: SEVSNPVersionAPI{Version: dateStr, Variant: attestation, SEVSNPVersion: version}, + signer: a.signer, + }) + + versionNames.addVersion(dateStr) + + res = append(res, putCmd{ + apiObject: versionNames, + signer: a.signer, + }) + + return res +} + +func removeVersion(list SEVSNPVersionList, versionStr string) (removedVersions SEVSNPVersionList, err error) { + versions := list.List() + for i, v := range versions { + if v == versionStr { + if i == len(versions)-1 { + removedVersions = SEVSNPVersionList{list: versions[:i], variant: list.variant} + } else { + removedVersions = SEVSNPVersionList{list: append(versions[:i], versions[i+1:]...), variant: list.variant} + } + return removedVersions, nil + } + } + return SEVSNPVersionList{}, fmt.Errorf("version %s not found in list %v", versionStr, versions) +} + +type crudCmd interface { + Execute(ctx context.Context, c *apiclient.Client) error +} + +type deleteCmd struct { + apiObject apiclient.APIObject +} + +func (d deleteCmd) Execute(ctx context.Context, c *apiclient.Client) error { + return apiclient.DeleteWithSignature(ctx, c, d.apiObject) +} + +type putCmd struct { + apiObject apiclient.APIObject + signer sigstore.Signer +} + +func (p putCmd) Execute(ctx context.Context, c *apiclient.Client) error { + return apiclient.SignAndUpdate(ctx, c, p.apiObject, p.signer) +} + +func executeAllCmds(ctx context.Context, client *apiclient.Client, cmds []crudCmd) error { + for _, cmd := range cmds { + if err := cmd.Execute(ctx, client); err != nil { + return fmt.Errorf("execute operation %+v: %w", cmd, err) + } + } + return nil +} diff --git a/internal/api/attestationconfigapi/client_test.go b/internal/api/attestationconfigapi/client_test.go new file mode 100644 index 000000000..9cae1bc5a --- /dev/null +++ b/internal/api/attestationconfigapi/client_test.go @@ -0,0 +1,65 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ +package attestationconfigapi + +import ( + "testing" + "time" + + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/stretchr/testify/assert" +) + +func TestUploadAzureSEVSNP(t *testing.T) { + sut := Client{ + bucketID: "bucket", + signer: fakeSigner{}, + } + version := SEVSNPVersion{} + date := time.Date(2023, 1, 1, 1, 1, 1, 1, time.UTC) + ops := sut.constructUploadCmd(variant.AzureSEVSNP{}, version, SEVSNPVersionList{list: []string{"2021-01-01-01-01.json", "2019-01-01-01-01.json"}, variant: variant.AzureSEVSNP{}}, date) + dateStr := "2023-01-01-01-01.json" + assert := assert.New(t) + assert.Contains(ops, putCmd{ + apiObject: SEVSNPVersionAPI{ + Variant: variant.AzureSEVSNP{}, + Version: dateStr, + SEVSNPVersion: version, + }, + signer: fakeSigner{}, + }) + assert.Contains(ops, putCmd{ + apiObject: SEVSNPVersionList{variant: variant.AzureSEVSNP{}, list: []string{"2023-01-01-01-01.json", "2021-01-01-01-01.json", "2019-01-01-01-01.json"}}, + signer: fakeSigner{}, + }) +} + +func TestDeleteAzureSEVSNPVersions(t *testing.T) { + sut := Client{ + bucketID: "bucket", + } + versions := SEVSNPVersionList{list: []string{"2023-01-01.json", "2021-01-01.json", "2019-01-01.json"}} + + ops, err := sut.deleteSEVSNPVersion(versions, "2021-01-01") + + assert := assert.New(t) + assert.NoError(err) + assert.Contains(ops, deleteCmd{ + apiObject: SEVSNPVersionAPI{ + Version: "2021-01-01.json", + }, + }) + + assert.Contains(ops, putCmd{ + apiObject: SEVSNPVersionList{list: []string{"2023-01-01.json", "2019-01-01.json"}}, + }) +} + +type fakeSigner struct{} + +func (fakeSigner) Sign(_ []byte) ([]byte, error) { + return []byte("signature"), nil +} diff --git a/internal/api/attestationconfigapi/fetcher.go b/internal/api/attestationconfigapi/fetcher.go index e7476f1e5..a54e3ebc7 100644 --- a/internal/api/attestationconfigapi/fetcher.go +++ b/internal/api/attestationconfigapi/fetcher.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package attestationconfigapi import ( "context" + "errors" "fmt" apifetcher "github.com/edgelesssys/constellation/v2/internal/api/fetcher" @@ -18,9 +19,14 @@ import ( const cosignPublicKey = constants.CosignPublicKeyReleases +// ErrNoVersionsFound is returned if no versions are found. +var ErrNoVersionsFound = errors.New("no versions found") + // Fetcher fetches config API resources without authentication. type Fetcher interface { - FetchLatestVersion(ctx context.Context, attestation variant.Variant) (Entry, error) + FetchSEVSNPVersion(ctx context.Context, version SEVSNPVersionAPI) (SEVSNPVersionAPI, error) + FetchSEVSNPVersionList(ctx context.Context, list SEVSNPVersionList) (SEVSNPVersionList, error) + FetchSEVSNPVersionLatest(ctx context.Context, attestation variant.Variant) (SEVSNPVersionAPI, error) } // fetcher fetches AttestationCfg API resources without authentication. @@ -59,43 +65,47 @@ func newFetcherWithClientAndVerifier(client apifetcher.HTTPClient, cosignVerifie return &fetcher{HTTPClient: client, verifier: cosignVerifier, cdnURL: url} } -// FetchLatestVersion returns the latest versions of the given type. -func (f *fetcher) FetchLatestVersion(ctx context.Context, variant variant.Variant) (Entry, error) { - list, err := f.fetchVersionList(ctx, variant) +// FetchSEVSNPVersionList fetches the version list information from the config API. +func (f *fetcher) FetchSEVSNPVersionList(ctx context.Context, list SEVSNPVersionList) (SEVSNPVersionList, error) { + // TODO(derpsteb): Replace with FetchAndVerify once we move to v2 of the config API. + fetchedList, err := apifetcher.Fetch(ctx, f.HTTPClient, f.cdnURL, list) if err != nil { - return Entry{}, err + return list, fmt.Errorf("fetching version list: %w", err) } - // latest version is first in list - return f.fetchVersion(ctx, list.List[0], variant) -} - -// fetchVersionList fetches the version list information from the config API. -func (f *fetcher) fetchVersionList(ctx context.Context, variant variant.Variant) (List, error) { - fetchedList, err := apifetcher.FetchAndVerify(ctx, f.HTTPClient, f.cdnURL, List{Variant: variant}, f.verifier) - if err != nil { - return List{}, fmt.Errorf("fetching version list: %w", err) - } - - // Set the attestation variant of the list as it is not part of the marshalled JSON retrieved by Fetch - fetchedList.Variant = variant + // Need to set this explicitly as the variant is not part of the marshalled JSON. + fetchedList.variant = list.variant return fetchedList, nil } -// fetchVersion fetches the version information from the config API. -func (f *fetcher) fetchVersion(ctx context.Context, version string, variant variant.Variant) (Entry, error) { - obj := Entry{ - Version: version, - Variant: variant, - } - fetchedVersion, err := apifetcher.FetchAndVerify(ctx, f.HTTPClient, f.cdnURL, obj, f.verifier) +// FetchSEVSNPVersion fetches the version information from the config API. +func (f *fetcher) FetchSEVSNPVersion(ctx context.Context, version SEVSNPVersionAPI) (SEVSNPVersionAPI, error) { + fetchedVersion, err := apifetcher.FetchAndVerify(ctx, f.HTTPClient, f.cdnURL, version, f.verifier) if err != nil { - return Entry{}, fmt.Errorf("fetching version %q: %w", version, err) + return fetchedVersion, fmt.Errorf("fetching version %s: %w", version.Version, err) } - // Set the attestation variant of the list as it is not part of the marshalled JSON retrieved by FetchAndVerify - fetchedVersion.Variant = variant + // Need to set this explicitly as the variant is not part of the marshalled JSON. + fetchedVersion.Variant = version.Variant return fetchedVersion, nil } + +// FetchSEVSNPVersionLatest returns the latest versions of the given type. +func (f *fetcher) FetchSEVSNPVersionLatest(ctx context.Context, attesation variant.Variant) (res SEVSNPVersionAPI, err error) { + list, err := f.FetchSEVSNPVersionList(ctx, SEVSNPVersionList{variant: attesation}) + if err != nil { + return res, ErrNoVersionsFound + } + + getVersionRequest := SEVSNPVersionAPI{ + Version: list.List()[0], // latest version is first in list + Variant: attesation, + } + res, err = f.FetchSEVSNPVersion(ctx, getVersionRequest) + if err != nil { + return res, err + } + return +} diff --git a/internal/api/attestationconfigapi/fetcher_test.go b/internal/api/attestationconfigapi/fetcher_test.go index 18c3a203e..cb9fd86eb 100644 --- a/internal/api/attestationconfigapi/fetcher_test.go +++ b/internal/api/attestationconfigapi/fetcher_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package attestationconfigapi import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -21,112 +22,87 @@ import ( ) func TestFetchLatestSEVSNPVersion(t *testing.T) { - latestVersionSNP := Entry{ - SEVSNPVersion: SEVSNPVersion{ - Microcode: 93, - TEE: 0, - SNP: 6, - Bootloader: 2, - }, - } - olderVersionSNP := Entry{ - SEVSNPVersion: SEVSNPVersion{ - Microcode: 1, - TEE: 0, - SNP: 1, - Bootloader: 1, - }, - } - latestVersionTDX := Entry{ - TDXVersion: TDXVersion{ - QESVN: 2, - PCESVN: 3, - TEETCBSVN: [16]byte{4}, - QEVendorID: [16]byte{5}, - XFAM: [8]byte{6}, - }, - } - olderVersionTDX := Entry{ - TDXVersion: TDXVersion{ - QESVN: 1, - PCESVN: 2, - TEETCBSVN: [16]byte{3}, - QEVendorID: [16]byte{4}, - XFAM: [8]byte{5}, - }, - } - latestStr := "2023-06-11-14-09.json" olderStr := "2019-01-01-01-01.json" - testCases := map[string]struct { + testcases := map[string]struct { fetcherVersions []string timeAtTest time.Time wantErr bool attestation variant.Variant - expectedVersion Entry - olderVersion Entry - latestVersion Entry + expectedVersion func() SEVSNPVersionAPI + olderVersion func() SEVSNPVersionAPI + latestVersion func() SEVSNPVersionAPI }{ - "get latest version azure-sev-snp": { + "get latest version azure": { fetcherVersions: []string{latestStr, olderStr}, attestation: variant.AzureSEVSNP{}, - expectedVersion: func() Entry { tmp := latestVersionSNP; tmp.Variant = variant.AzureSEVSNP{}; return tmp }(), - olderVersion: func() Entry { tmp := olderVersionSNP; tmp.Variant = variant.AzureSEVSNP{}; return tmp }(), - latestVersion: func() Entry { tmp := latestVersionSNP; tmp.Variant = variant.AzureSEVSNP{}; return tmp }(), + expectedVersion: func() SEVSNPVersionAPI { tmp := latestVersion; tmp.Variant = variant.AzureSEVSNP{}; return tmp }, + olderVersion: func() SEVSNPVersionAPI { tmp := olderVersion; tmp.Variant = variant.AzureSEVSNP{}; return tmp }, + latestVersion: func() SEVSNPVersionAPI { tmp := latestVersion; tmp.Variant = variant.AzureSEVSNP{}; return tmp }, }, - "get latest version aws-sev-snp": { + "get latest version aws": { fetcherVersions: []string{latestStr, olderStr}, attestation: variant.AWSSEVSNP{}, - expectedVersion: func() Entry { tmp := latestVersionSNP; tmp.Variant = variant.AWSSEVSNP{}; return tmp }(), - olderVersion: func() Entry { tmp := olderVersionSNP; tmp.Variant = variant.AWSSEVSNP{}; return tmp }(), - latestVersion: func() Entry { tmp := latestVersionSNP; tmp.Variant = variant.AWSSEVSNP{}; return tmp }(), - }, - "get latest version azure-tdx": { - fetcherVersions: []string{latestStr, olderStr}, - attestation: variant.AzureTDX{}, - expectedVersion: func() Entry { tmp := latestVersionTDX; tmp.Variant = variant.AzureTDX{}; return tmp }(), - olderVersion: func() Entry { tmp := olderVersionTDX; tmp.Variant = variant.AzureTDX{}; return tmp }(), - latestVersion: func() Entry { tmp := latestVersionTDX; tmp.Variant = variant.AzureTDX{}; return tmp }(), + expectedVersion: func() SEVSNPVersionAPI { tmp := latestVersion; tmp.Variant = variant.AWSSEVSNP{}; return tmp }, + olderVersion: func() SEVSNPVersionAPI { tmp := olderVersion; tmp.Variant = variant.AWSSEVSNP{}; return tmp }, + latestVersion: func() SEVSNPVersionAPI { tmp := latestVersion; tmp.Variant = variant.AWSSEVSNP{}; return tmp }, }, } - for name, tc := range testCases { + for name, tc := range testcases { t.Run(name, func(t *testing.T) { client := &http.Client{ Transport: &fakeConfigAPIHandler{ attestation: tc.attestation, versions: tc.fetcherVersions, latestDate: latestStr, - latestVersion: tc.latestVersion, + latestVersion: tc.latestVersion(), olderDate: olderStr, - olderVersion: tc.olderVersion, + olderVersion: tc.olderVersion(), }, } - fetcher := newFetcherWithClientAndVerifier(client, stubVerifier{}, constants.CDNRepositoryURL) - res, err := fetcher.FetchLatestVersion(t.Context(), tc.attestation) + fetcher := newFetcherWithClientAndVerifier(client, dummyVerifier{}, constants.CDNRepositoryURL) + res, err := fetcher.FetchSEVSNPVersionLatest(context.Background(), tc.attestation) assert := assert.New(t) if tc.wantErr { assert.Error(err) } else { assert.NoError(err) - assert.Equal(tc.expectedVersion, res) + assert.Equal(tc.expectedVersion(), res) } }) } } +var latestVersion = SEVSNPVersionAPI{ + SEVSNPVersion: SEVSNPVersion{ + Microcode: 93, + TEE: 0, + SNP: 6, + Bootloader: 2, + }, +} + +var olderVersion = SEVSNPVersionAPI{ + SEVSNPVersion: SEVSNPVersion{ + Microcode: 1, + TEE: 0, + SNP: 1, + Bootloader: 1, + }, +} + type fakeConfigAPIHandler struct { attestation variant.Variant versions []string latestDate string - latestVersion Entry + latestVersion SEVSNPVersionAPI olderDate string - olderVersion Entry + olderVersion SEVSNPVersionAPI } // RoundTrip resolves the request and returns a dummy response. func (f *fakeConfigAPIHandler) RoundTrip(req *http.Request) (*http.Response, error) { - switch req.URL.Path { - case fmt.Sprintf("/constellation/v1/attestation/%s/list", f.attestation.String()): + if req.URL.Path == fmt.Sprintf("/constellation/v1/attestation/%s/list", f.attestation.String()) { res := &http.Response{} bt, err := json.Marshal(f.versions) if err != nil { @@ -137,14 +113,7 @@ func (f *fakeConfigAPIHandler) RoundTrip(req *http.Request) (*http.Response, err res.Header.Set("Content-Type", "application/json") res.StatusCode = http.StatusOK return res, nil - - case fmt.Sprintf("/constellation/v1/attestation/%s/list.sig", f.attestation.String()): - res := &http.Response{} - res.Body = io.NopCloser(bytes.NewReader([]byte("null"))) - res.StatusCode = http.StatusOK - return res, nil - - case fmt.Sprintf("/constellation/v1/attestation/%s/%s", f.attestation.String(), f.latestDate): + } else if req.URL.Path == fmt.Sprintf("/constellation/v1/attestation/%s/%s", f.attestation.String(), f.latestDate) { res := &http.Response{} bt, err := json.Marshal(f.latestVersion) if err != nil { @@ -154,7 +123,7 @@ func (f *fakeConfigAPIHandler) RoundTrip(req *http.Request) (*http.Response, err res.StatusCode = http.StatusOK return res, nil - case fmt.Sprintf("/constellation/v1/attestation/%s/%s", f.attestation.String(), f.olderDate): + } else if req.URL.Path == fmt.Sprintf("/constellation/v1/attestation/%s/%s", f.attestation.String(), f.olderDate) { res := &http.Response{} bt, err := json.Marshal(f.olderVersion) if err != nil { @@ -163,14 +132,13 @@ func (f *fakeConfigAPIHandler) RoundTrip(req *http.Request) (*http.Response, err res.Body = io.NopCloser(bytes.NewReader(bt)) res.StatusCode = http.StatusOK return res, nil - - case fmt.Sprintf("/constellation/v1/attestation/%s/%s.sig", f.attestation.String(), f.latestDate): + } else if req.URL.Path == fmt.Sprintf("/constellation/v1/attestation/%s/%s.sig", f.attestation.String(), f.latestDate) { res := &http.Response{} res.Body = io.NopCloser(bytes.NewReader([]byte("null"))) res.StatusCode = http.StatusOK return res, nil - case fmt.Sprintf("/constellation/v1/attestation/%s/%s.sig", f.attestation.String(), f.olderDate): + } else if req.URL.Path == fmt.Sprintf("/constellation/v1/attestation/%s/%s.sig", f.attestation.String(), f.olderDate) { res := &http.Response{} res.Body = io.NopCloser(bytes.NewReader([]byte("null"))) res.StatusCode = http.StatusOK @@ -180,8 +148,8 @@ func (f *fakeConfigAPIHandler) RoundTrip(req *http.Request) (*http.Response, err return nil, errors.New("no endpoint found") } -type stubVerifier struct{} +type dummyVerifier struct{} -func (s stubVerifier) VerifySignature(_, _ []byte) error { +func (s dummyVerifier) VerifySignature(_, _ []byte) error { return nil } diff --git a/internal/api/attestationconfigapi/reporter.go b/internal/api/attestationconfigapi/reporter.go new file mode 100644 index 000000000..00656e881 --- /dev/null +++ b/internal/api/attestationconfigapi/reporter.go @@ -0,0 +1,189 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +/* +The reporter contains the logic to determine a latest version for Azure SEVSNP based on cached version values observed on CVM instances. +Some code in this file (e.g. listing cached files) does not rely on dedicated API objects and instead uses the AWS SDK directly, +for no other reason than original development speed. +*/ +package attestationconfigapi + +import ( + "context" + "errors" + "fmt" + "path" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go/aws" + + "github.com/edgelesssys/constellation/v2/internal/api/client" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" +) + +// cachedVersionsSubDir is the subdirectory in the bucket where the cached versions are stored. +const cachedVersionsSubDir = "cached-versions" + +// ErrNoNewerVersion is returned if the input version is not newer than the latest API version. +var ErrNoNewerVersion = errors.New("input version is not newer than latest API version") + +func reportVersionDir(attestation variant.Variant) string { + return path.Join(AttestationURLPath, attestation.String(), cachedVersionsSubDir) +} + +// UploadSEVSNPVersionLatest saves the given version to the cache, determines the smallest +// TCB version in the cache among the last cacheWindowSize versions and updates +// the latest version in the API if there is an update. +// force can be used to bypass the validation logic against the cached versions. +func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation variant.Variant, inputVersion, + latestAPIVersion SEVSNPVersion, now time.Time, force bool, +) error { + if err := c.cacheSEVSNPVersion(ctx, attestation, inputVersion, now); err != nil { + return fmt.Errorf("reporting version: %w", err) + } + if force { + return c.uploadSEVSNPVersion(ctx, attestation, inputVersion, now) + } + versionDates, err := c.listCachedVersions(ctx, attestation) + if err != nil { + return fmt.Errorf("list reported versions: %w", err) + } + if len(versionDates) < c.cacheWindowSize { + c.s3Client.Logger.Warn(fmt.Sprintf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize)) + return nil + } + minVersion, minDate, err := c.findMinVersion(ctx, attestation, versionDates) + if err != nil { + return fmt.Errorf("get minimal version: %w", err) + } + c.s3Client.Logger.Info(fmt.Sprintf("Found minimal version: %+v with date: %s", minVersion, minDate)) + shouldUpdateAPI, err := isInputNewerThanOtherVersion(minVersion, latestAPIVersion) + if err != nil { + return ErrNoNewerVersion + } + if !shouldUpdateAPI { + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion)) + return nil + } + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion)) + t, err := time.Parse(VersionFormat, minDate) + if err != nil { + return fmt.Errorf("parsing date: %w", err) + } + if err := c.uploadSEVSNPVersion(ctx, attestation, minVersion, t); err != nil { + return fmt.Errorf("uploading version: %w", err) + } + c.s3Client.Logger.Info(fmt.Sprintf("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion)) + return nil +} + +// cacheSEVSNPVersion uploads the latest observed version numbers of the Azure SEVSNP. This version is used to later report the latest version numbers to the API. +func (c Client) cacheSEVSNPVersion(ctx context.Context, attestation variant.Variant, version SEVSNPVersion, date time.Time) error { + dateStr := date.Format(VersionFormat) + ".json" + res := putCmd{ + apiObject: reportedSEVSNPVersionAPI{Version: dateStr, variant: attestation, SEVSNPVersion: version}, + signer: c.signer, + } + return res.Execute(ctx, c.s3Client) +} + +func (c Client) listCachedVersions(ctx context.Context, attestation variant.Variant) ([]string, error) { + list, err := c.s3Client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(c.bucketID), + Prefix: aws.String(reportVersionDir(attestation)), + }) + if err != nil { + return nil, fmt.Errorf("list objects: %w", err) + } + var dates []string + for _, obj := range list.Contents { + fileName := path.Base(*obj.Key) + if strings.HasSuffix(fileName, ".json") { + dates = append(dates, fileName[:len(fileName)-5]) + } + } + return dates, nil +} + +// findMinVersion finds the minimal version of the given version dates among the latest values in the version window size. +func (c Client) findMinVersion(ctx context.Context, attesation variant.Variant, versionDates []string) (SEVSNPVersion, string, error) { + var minimalVersion *SEVSNPVersion + var minimalDate string + sort.Sort(sort.Reverse(sort.StringSlice(versionDates))) // sort in reverse order to slice the latest versions + versionDates = versionDates[:c.cacheWindowSize] + sort.Strings(versionDates) // sort with oldest first to to take the minimal version with the oldest date + for _, date := range versionDates { + obj, err := client.Fetch(ctx, c.s3Client, reportedSEVSNPVersionAPI{Version: date + ".json", variant: attesation}) + if err != nil { + return SEVSNPVersion{}, "", fmt.Errorf("get object: %w", err) + } + // Need to set this explicitly as the variant is not part of the marshalled JSON. + obj.variant = attesation + + if minimalVersion == nil { + minimalVersion = &obj.SEVSNPVersion + minimalDate = date + } else { + shouldUpdateMinimal, err := isInputNewerThanOtherVersion(*minimalVersion, obj.SEVSNPVersion) + if err != nil { + continue + } + if shouldUpdateMinimal { + minimalVersion = &obj.SEVSNPVersion + minimalDate = date + } + } + } + return *minimalVersion, minimalDate, nil +} + +// isInputNewerThanOtherVersion compares all version fields and returns true if any input field is newer. +func isInputNewerThanOtherVersion(input, other SEVSNPVersion) (bool, error) { + if input == other { + return false, nil + } + if input.TEE < other.TEE { + return false, fmt.Errorf("input TEE version: %d is older than latest API version: %d", input.TEE, other.TEE) + } + if input.SNP < other.SNP { + return false, fmt.Errorf("input SNP version: %d is older than latest API version: %d", input.SNP, other.SNP) + } + if input.Microcode < other.Microcode { + return false, fmt.Errorf("input Microcode version: %d is older than latest API version: %d", input.Microcode, other.Microcode) + } + if input.Bootloader < other.Bootloader { + return false, fmt.Errorf("input Bootloader version: %d is older than latest API version: %d", input.Bootloader, other.Bootloader) + } + return true, nil +} + +// reportedSEVSNPVersionAPI is the request to get the version information of the specific version in the config api. +type reportedSEVSNPVersionAPI struct { + Version string `json:"-"` + variant variant.Variant `json:"-"` + SEVSNPVersion +} + +// JSONPath returns the path to the JSON file for the request to the config api. +func (i reportedSEVSNPVersionAPI) JSONPath() string { + return path.Join(reportVersionDir(i.variant), i.Version) +} + +// ValidateRequest validates the request. +func (i reportedSEVSNPVersionAPI) ValidateRequest() error { + if !strings.HasSuffix(i.Version, ".json") { + return fmt.Errorf("version has no .json suffix") + } + return nil +} + +// Validate is a No-Op at the moment. +func (i reportedSEVSNPVersionAPI) Validate() error { + return nil +} diff --git a/internal/api/attestationconfigapi/reporter_test.go b/internal/api/attestationconfigapi/reporter_test.go new file mode 100644 index 000000000..ea37d2d2f --- /dev/null +++ b/internal/api/attestationconfigapi/reporter_test.go @@ -0,0 +1,74 @@ +/* +Copyright (c) Edgeless Systems GmbH +SPDX-License-Identifier: AGPL-3.0-only +*/ +package attestationconfigapi + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsInputNewerThanLatestAPI(t *testing.T) { + newTestCfg := func() SEVSNPVersion { + return SEVSNPVersion{ + Microcode: 93, + TEE: 0, + SNP: 6, + Bootloader: 2, + } + } + + testCases := map[string]struct { + latest SEVSNPVersion + input SEVSNPVersion + expect bool + errMsg string + }{ + "input is older than latest": { + input: func(c SEVSNPVersion) SEVSNPVersion { + c.Microcode-- + return c + }(newTestCfg()), + latest: newTestCfg(), + expect: false, + errMsg: "input Microcode version: 92 is older than latest API version: 93", + }, + "input has greater and smaller version field than latest": { + input: func(c SEVSNPVersion) SEVSNPVersion { + c.Microcode++ + c.Bootloader-- + return c + }(newTestCfg()), + latest: newTestCfg(), + expect: false, + errMsg: "input Bootloader version: 1 is older than latest API version: 2", + }, + "input is newer than latest": { + input: func(c SEVSNPVersion) SEVSNPVersion { + c.TEE++ + return c + }(newTestCfg()), + latest: newTestCfg(), + expect: true, + }, + "input is equal to latest": { + input: newTestCfg(), + latest: newTestCfg(), + expect: false, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + isNewer, err := isInputNewerThanOtherVersion(tc.input, tc.latest) + assert := assert.New(t) + if tc.errMsg != "" { + assert.EqualError(err, tc.errMsg) + } else { + assert.NoError(err) + assert.Equal(tc.expect, isNewer) + } + }) + } +} diff --git a/internal/api/attestationconfigapi/snp.go b/internal/api/attestationconfigapi/snp.go new file mode 100644 index 000000000..68098a3ad --- /dev/null +++ b/internal/api/attestationconfigapi/snp.go @@ -0,0 +1,113 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package attestationconfigapi + +import ( + "encoding/json" + "fmt" + "path" + "sort" + "strings" + + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" +) + +// AttestationURLPath is the URL path to the attestation versions. +const AttestationURLPath = "constellation/v1/attestation" + +// SEVSNPVersion tracks the latest version of each component of the Azure SEVSNP. +type SEVSNPVersion struct { + // Bootloader is the latest version of the Azure SEVSNP bootloader. + Bootloader uint8 `json:"bootloader"` + // TEE is the latest version of the Azure SEVSNP TEE. + TEE uint8 `json:"tee"` + // SNP is the latest version of the Azure SEVSNP SNP. + SNP uint8 `json:"snp"` + // Microcode is the latest version of the Azure SEVSNP microcode. + Microcode uint8 `json:"microcode"` +} + +// SEVSNPVersionAPI is the request to get the version information of the specific version in the config api. +// Because variant is not part of the marshalled JSON, fetcher and client methods need to fill the variant property. +// Once we switch to v2 of the API we should embed the variant in the object. +// That would remove the possibility of some fetcher/client code forgetting to set the variant. +type SEVSNPVersionAPI struct { + Version string `json:"-"` + Variant variant.Variant `json:"-"` + SEVSNPVersion +} + +// JSONPath returns the path to the JSON file for the request to the config api. +func (i SEVSNPVersionAPI) JSONPath() string { + return path.Join(AttestationURLPath, i.Variant.String(), i.Version) +} + +// ValidateRequest validates the request. +func (i SEVSNPVersionAPI) ValidateRequest() error { + if !strings.HasSuffix(i.Version, ".json") { + return fmt.Errorf("version has no .json suffix") + } + return nil +} + +// Validate is a No-Op at the moment. +func (i SEVSNPVersionAPI) Validate() error { + return nil +} + +// SEVSNPVersionList is the request to list all versions in the config api. +// Because variant is not part of the marshalled JSON, fetcher and client methods need to fill the variant property. +// Once we switch to v2 of the API we could embed the variant in the object and remove some code from fetcher & client. +// That would remove the possibility of some fetcher/client code forgetting to set the variant. +type SEVSNPVersionList struct { + variant variant.Variant + list []string +} + +// MarshalJSON marshals the i's list property to JSON. +func (i SEVSNPVersionList) MarshalJSON() ([]byte, error) { + return json.Marshal(i.list) +} + +// UnmarshalJSON unmarshals a list of strings into i's list property. +func (i *SEVSNPVersionList) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &i.list) +} + +// List returns i's list property. +func (i SEVSNPVersionList) List() []string { return i.list } + +// JSONPath returns the path to the JSON file for the request to the config api. +func (i SEVSNPVersionList) JSONPath() string { + return path.Join(AttestationURLPath, i.variant.String(), "list") +} + +// ValidateRequest is a NoOp as there is no input. +func (i SEVSNPVersionList) ValidateRequest() error { + return nil +} + +// SortReverse sorts the list of versions in reverse order. +func (i *SEVSNPVersionList) SortReverse() { + sort.Sort(sort.Reverse(sort.StringSlice(i.list))) +} + +// addVersion adds new to i's list and sorts the element in descending order. +func (i *SEVSNPVersionList) addVersion(new string) { + i.list = append(i.list, new) + i.list = variant.RemoveDuplicate(i.list) + + i.SortReverse() +} + +// Validate validates the response. +func (i SEVSNPVersionList) Validate() error { + if len(i.list) < 1 { + return fmt.Errorf("no versions found in /list") + } + return nil +} diff --git a/internal/api/attestationconfigapi/version_test.go b/internal/api/attestationconfigapi/snp_test.go similarity index 59% rename from internal/api/attestationconfigapi/version_test.go rename to internal/api/attestationconfigapi/snp_test.go index 57d80b5fa..2fe3ea8c9 100644 --- a/internal/api/attestationconfigapi/version_test.go +++ b/internal/api/attestationconfigapi/snp_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package attestationconfigapi @@ -14,23 +14,23 @@ import ( "github.com/stretchr/testify/require" ) -func TestVersionListMarshalUnmarshalJSON(t *testing.T) { +func TestSEVSNPVersionListMarshalUnmarshalJSON(t *testing.T) { tests := map[string]struct { - input List - output List + input SEVSNPVersionList + output SEVSNPVersionList wantDiff bool }{ "success": { - input: List{List: []string{"v1", "v2"}}, - output: List{List: []string{"v1", "v2"}}, + input: SEVSNPVersionList{list: []string{"v1", "v2"}}, + output: SEVSNPVersionList{list: []string{"v1", "v2"}}, }, "variant is lost": { - input: List{List: []string{"v1", "v2"}, Variant: variant.AzureSEVSNP{}}, - output: List{List: []string{"v1", "v2"}}, + input: SEVSNPVersionList{list: []string{"v1", "v2"}, variant: variant.AzureSEVSNP{}}, + output: SEVSNPVersionList{list: []string{"v1", "v2"}}, }, "wrong order": { - input: List{List: []string{"v1", "v2"}}, - output: List{List: []string{"v2", "v1"}}, + input: SEVSNPVersionList{list: []string{"v1", "v2"}}, + output: SEVSNPVersionList{list: []string{"v2", "v1"}}, wantDiff: true, }, } @@ -40,7 +40,7 @@ func TestVersionListMarshalUnmarshalJSON(t *testing.T) { inputRaw, err := tc.input.MarshalJSON() require.NoError(t, err) - var actual List + var actual SEVSNPVersionList err = actual.UnmarshalJSON(inputRaw) require.NoError(t, err) @@ -53,7 +53,7 @@ func TestVersionListMarshalUnmarshalJSON(t *testing.T) { } } -func TestVersionListAddVersion(t *testing.T) { +func TestSEVSNPVersionListAddVersion(t *testing.T) { tests := map[string]struct { versions []string new string @@ -68,10 +68,10 @@ func TestVersionListAddVersion(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - v := List{List: tc.versions} - v.AddVersion(tc.new) + v := SEVSNPVersionList{list: tc.versions} + v.addVersion(tc.new) - assert.Equal(t, tc.expected, v.List) + assert.Equal(t, tc.expected, v.list) }) } } diff --git a/internal/api/attestationconfigapi/version.go b/internal/api/attestationconfigapi/version.go deleted file mode 100644 index 82d218872..000000000 --- a/internal/api/attestationconfigapi/version.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package attestationconfigapi - -import ( - "encoding/json" - "fmt" - "path" - "sort" - "strings" - - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" -) - -// AttestationURLPath is the URL path to the attestation versions. -const AttestationURLPath = "constellation/v1/attestation" - -// SEVSNPVersion tracks the latest version of each component for SEV-SNP. -type SEVSNPVersion struct { - // Bootloader is the latest version of the SEV-SNP bootloader. - Bootloader uint8 `json:"bootloader"` - // TEE is the latest version of the SEV-SNP TEE. - TEE uint8 `json:"tee"` - // SNP is the latest version of the SEV-SNP SNP. - SNP uint8 `json:"snp"` - // Microcode is the latest version of the SEV-SNP microcode. - Microcode uint8 `json:"microcode"` -} - -// TDXVersion tracks the latest version of each component for TDX. -type TDXVersion struct { - // QESVN is the latest QE security version number. - QESVN uint16 `json:"qeSVN"` - // PCESVN is the latest PCE security version number. - PCESVN uint16 `json:"pceSVN"` - // TEETCBSVN are the latest component-wise security version numbers for the TEE. - TEETCBSVN [16]byte `json:"teeTCBSVN"` - // QEVendorID is the latest QE vendor ID. - QEVendorID [16]byte `json:"qeVendorID"` - // XFAM is the latest XFAM field. - XFAM [8]byte `json:"xfam"` -} - -// Entry is the request to get the version information of the specific version in the config api. -// -// TODO: Because variant is not part of the marshalled JSON, fetcher and client methods need to fill the variant property. -// In API v2 we should embed the variant in the object and remove some code from fetcher & client. -// That would remove the possibility of some fetcher/client code forgetting to set the variant. -type Entry struct { - Version string `json:"-"` - Variant variant.Variant `json:"-"` - SEVSNPVersion - TDXVersion -} - -// JSONPath returns the path to the JSON file for the request to the config api. -func (i Entry) JSONPath() string { - return path.Join(AttestationURLPath, i.Variant.String(), i.Version) -} - -// ValidateRequest validates the request. -func (i Entry) ValidateRequest() error { - if !strings.HasSuffix(i.Version, ".json") { - return fmt.Errorf("version has no .json suffix") - } - return nil -} - -// Validate is a No-Op at the moment. -func (i Entry) Validate() error { - return nil -} - -// List is the request to retrieve of all versions in the API for one attestation variant. -// -// TODO: Because variant is not part of the marshalled JSON, fetcher and client methods need to fill the variant property. -// In API v2 we should embed the variant in the object and remove some code from fetcher & client. -// That would remove the possibility of some fetcher/client code forgetting to set the variant. -type List struct { - Variant variant.Variant - List []string -} - -// MarshalJSON marshals the i's list property to JSON. -func (i List) MarshalJSON() ([]byte, error) { - return json.Marshal(i.List) -} - -// UnmarshalJSON unmarshals a list of strings into i's list property. -func (i *List) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &i.List) -} - -// JSONPath returns the path to the JSON file for the request to the config api. -func (i List) JSONPath() string { - return path.Join(AttestationURLPath, i.Variant.String(), "list") -} - -// ValidateRequest is a NoOp as there is no input. -func (i List) ValidateRequest() error { - return nil -} - -// SortReverse sorts the list of versions in reverse order. -func (i *List) SortReverse() { - sort.Sort(sort.Reverse(sort.StringSlice(i.List))) -} - -// AddVersion adds new to i's list and sorts the element in descending order. -func (i *List) AddVersion(ver string) { - i.List = append(i.List, ver) - i.List = variant.RemoveDuplicate(i.List) - - i.SortReverse() -} - -// Validate validates the response. -func (i List) Validate() error { - if len(i.List) < 1 { - return fmt.Errorf("no versions found in /list") - } - return nil -} diff --git a/internal/api/client/client.go b/internal/api/client/client.go index 71a05794c..d9ad7ec9e 100644 --- a/internal/api/client/client.go +++ b/internal/api/client/client.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -53,7 +53,7 @@ type Client struct { dirtyPaths []string // written paths to be invalidated DryRun bool // no write operations are performed - log *slog.Logger + Logger *slog.Logger } // NewReadOnlyClient creates a new read-only client. @@ -77,7 +77,7 @@ func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID strin s3ClientClose: staticUploadClientClose, bucket: bucket, DryRun: true, - log: log, + Logger: log, } clientClose := func(ctx context.Context) error { return client.Close(ctx) @@ -106,7 +106,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu s3ClientClose: staticUploadClientClose, bucket: bucket, DryRun: dryRun, - log: log, + Logger: log, } clientClose := func(ctx context.Context) error { return client.Close(ctx) @@ -119,7 +119,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu // It invalidates the CDN cache for all uploaded files. func (c *Client) Close(ctx context.Context) error { if c.s3ClientClose == nil { - c.log.Debug("Client has no s3ClientClose") + c.Logger.Debug("Client has no s3ClientClose") return nil } return c.s3ClientClose(ctx) @@ -131,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Bucket: &c.bucket, Prefix: &path, } - c.log.Debug(fmt.Sprintf("Listing objects in %q", path)) + c.Logger.Debug(fmt.Sprintf("Listing objects in %s", path)) objs := []s3types.Object{} out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)} for out.IsTruncated != nil && *out.IsTruncated { @@ -142,10 +142,10 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } objs = append(objs, out.Contents...) } - c.log.Debug(fmt.Sprintf("Found %d objects in %q", len(objs), path)) + c.Logger.Debug(fmt.Sprintf("Found %d objects in %s", len(objs), path)) if len(objs) == 0 { - c.log.Warn(fmt.Sprintf("Path %s is already empty", path)) + c.Logger.Warn(fmt.Sprintf("Path %s is already empty", path)) return nil } @@ -155,7 +155,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } if c.DryRun { - c.log.Debug(fmt.Sprintf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs)) + c.Logger.Debug(fmt.Sprintf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs)) return nil } @@ -167,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Objects: objIDs, }, } - c.log.Debug(fmt.Sprintf("Deleting %d objects in %q", len(objs), path)) + c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %s", len(objs), path)) if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil { return fmt.Errorf("deleting objects in %s: %w", path, err) } @@ -197,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) { Key: ptr(obj.JSONPath()), } - c.log.Debug(fmt.Sprintf("Fetching %T from s3: %q", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %s", obj, obj.JSONPath())) out, err := c.s3Client.GetObject(ctx, in) var noSuchkey *s3types.NoSuchKey if errors.As(err, &noSuchkey) { @@ -231,7 +231,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { } if c.DryRun { - c.log.With(slog.String("bucket", c.bucket), slog.String("key", obj.JSONPath()), slog.String("body", string(rawJSON))).Debug("DryRun: s3 put object") + c.Logger.With(slog.String("bucket", c.bucket), slog.String("key", obj.JSONPath()), slog.String("body", string(rawJSON))).Debug("DryRun: s3 put object") return nil } @@ -243,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath()) - c.log.Debug(fmt.Sprintf("Uploading %T to s3: %q", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %v", obj, obj.JSONPath())) if _, err := c.Upload(ctx, in); err != nil { return fmt.Errorf("uploading %T: %w", obj, err) } @@ -306,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error { Key: ptr(obj.JSONPath()), } - c.log.Debug(fmt.Sprintf("Deleting %T from s3: %q", obj, obj.JSONPath())) + c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %s", obj, obj.JSONPath())) if _, err := c.DeleteObject(ctx, in); err != nil { return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err) } diff --git a/internal/api/fetcher/fetcher.go b/internal/api/fetcher/fetcher.go index d5b407467..2b18f9d15 100644 --- a/internal/api/fetcher/fetcher.go +++ b/internal/api/fetcher/fetcher.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -20,10 +20,11 @@ package fetcher import ( "context" "encoding/json" + "errors" "fmt" - "io" "net/http" "net/url" + "strings" "github.com/edgelesssys/constellation/v2/internal/sigstore" ) @@ -39,12 +40,45 @@ func NewHTTPClient() HTTPClient { // Fetch fetches the given apiObject from the public Constellation CDN. // Fetch does not require authentication. func Fetch[T apiObject](ctx context.Context, c HTTPClient, cdnURL string, obj T) (T, error) { - rawObj, err := fetch(ctx, c, cdnURL, obj) - if err != nil { - return *new(T), fmt.Errorf("fetching %T: %w", obj, err) + if err := obj.ValidateRequest(); err != nil { + return *new(T), fmt.Errorf("validating request for %T: %w", obj, err) } - return parseObject(rawObj, obj) + urlObj, err := url.Parse(cdnURL) + if err != nil { + return *new(T), fmt.Errorf("parsing CDN root URL: %w", err) + } + urlObj.Path = obj.JSONPath() + url := urlObj.String() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return *new(T), fmt.Errorf("creating request for %T: %w", obj, err) + } + + resp, err := c.Do(req) + if err != nil { + return *new(T), fmt.Errorf("sending request for %T: %w", obj, err) + } + defer resp.Body.Close() + switch resp.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return *new(T), &NotFoundError{fmt.Errorf("requesting resource at %s returned status code 404", url)} + default: + return *new(T), fmt.Errorf("unexpected status code %d while requesting resource", resp.StatusCode) + } + + var newObj T + if err := json.NewDecoder(resp.Body).Decode(&newObj); err != nil { + return *new(T), fmt.Errorf("decoding %T: %w", obj, err) + } + + if newObj.Validate() != nil { + return *new(T), fmt.Errorf("received invalid %T: %w", newObj, newObj.Validate()) + } + + return newObj, nil } // FetchAndVerify fetches the given apiObject, checks if it can fetch an accompanying signature and verifies if the signature matches the found object. @@ -52,70 +86,25 @@ func Fetch[T apiObject](ctx context.Context, c HTTPClient, cdnURL string, obj T) // FetchAndVerify uses a generic to return a new object of type T. // Otherwise the caller would have to cast the interface type to a concrete object, which could fail. func FetchAndVerify[T apiObject](ctx context.Context, c HTTPClient, cdnURL string, obj T, cosignVerifier sigstore.Verifier) (T, error) { - rawObj, err := fetch(ctx, c, cdnURL, obj) + fetchedObj, err := Fetch(ctx, c, cdnURL, obj) if err != nil { - return *new(T), fmt.Errorf("fetching %T: %w", obj, err) + return fetchedObj, fmt.Errorf("fetching object: %w", err) } - fetchedObj, err := parseObject(rawObj, obj) + marshalledObj, err := json.Marshal(fetchedObj) if err != nil { - return fetchedObj, fmt.Errorf("parsing %T: %w", obj, err) + return fetchedObj, fmt.Errorf("marshalling object: %w", err) } - signature, err := Fetch(ctx, c, cdnURL, signature{Signed: obj.JSONPath()}) if err != nil { return fetchedObj, fmt.Errorf("fetching signature: %w", err) } - err = cosignVerifier.VerifySignature(rawObj, signature.Signature) + err = cosignVerifier.VerifySignature(marshalledObj, signature.Signature) if err != nil { return fetchedObj, fmt.Errorf("verifying signature: %w", err) } return fetchedObj, nil } -func fetch[T apiObject](ctx context.Context, c HTTPClient, cdnURL string, obj T) ([]byte, error) { - if err := obj.ValidateRequest(); err != nil { - return nil, fmt.Errorf("validating request for %T: %w", obj, err) - } - - urlObj, err := url.Parse(cdnURL) - if err != nil { - return nil, fmt.Errorf("parsing CDN root URL: %w", err) - } - urlObj.Path = obj.JSONPath() - url := urlObj.String() - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) - if err != nil { - return nil, fmt.Errorf("creating request for %T: %w", obj, err) - } - - resp, err := c.Do(req) - if err != nil { - return nil, fmt.Errorf("sending request for %T: %w", obj, err) - } - defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return nil, &NotFoundError{fmt.Errorf("requesting resource at %s returned status code 404", url)} - default: - return nil, fmt.Errorf("unexpected status code %d while requesting resource", resp.StatusCode) - } - - return io.ReadAll(resp.Body) -} - -func parseObject[T apiObject](rawObj []byte, obj T) (T, error) { - var newObj T - if err := json.Unmarshal(rawObj, &newObj); err != nil { - return *new(T), fmt.Errorf("decoding %T: %w", obj, err) - } - if newObj.Validate() != nil { - return *new(T), fmt.Errorf("received invalid %T: %w", newObj, newObj.Validate()) - } - return newObj, nil -} - // NotFoundError is an error that is returned when a resource is not found. type NotFoundError struct { err error @@ -143,7 +132,7 @@ type apiObject interface { // signature manages the signature of a object saved at location 'Signed'. type signature struct { // Signed is the object that is signed. - Signed string `json:"signed"` + Signed string `json:"-"` // Signature is the signature of `Signed`. Signature []byte `json:"signature"` } @@ -153,8 +142,12 @@ func (s signature) JSONPath() string { return s.Signed + ".sig" } -// ValidateRequest is a no-op. +// ValidateRequest validates the request. func (s signature) ValidateRequest() error { + if !strings.HasSuffix(s.Signed, ".json") { + return errors.New("signed object missing .json suffix") + } + return nil } diff --git a/internal/api/versionsapi/apiconstants.go b/internal/api/versionsapi/apiconstants.go index 832d48ec4..bca2b2b4c 100644 --- a/internal/api/versionsapi/apiconstants.go +++ b/internal/api/versionsapi/apiconstants.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/cli/BUILD.bazel b/internal/api/versionsapi/cli/BUILD.bazel index cc720acb4..411c68f38 100644 --- a/internal/api/versionsapi/cli/BUILD.bazel +++ b/internal/api/versionsapi/cli/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "@com_github_aws_smithy_go//:smithy-go", "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azidentity//:azidentity", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6//:armcompute", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5//:armcompute", "@com_github_googleapis_gax_go_v2//:gax-go", "@com_github_spf13_cobra//:cobra", "@com_google_cloud_go_compute//apiv1", diff --git a/internal/api/versionsapi/cli/add.go b/internal/api/versionsapi/cli/add.go index 8d7782e3d..89c64c2b7 100644 --- a/internal/api/versionsapi/cli/add.go +++ b/internal/api/versionsapi/cli/add.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -16,6 +16,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" + "golang.org/x/mod/semver" ) func newAddCmd() *cobra.Command { @@ -52,8 +53,18 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "dryRun", flags.dryRun, "kind", flags.version.Kind(), "latest", flags.latest, "ref", flags.version.Ref(), - "stream", flags.version.Stream(), "version", flags.version.Version()) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) + + log.Debug("Validating flags") + if err := flags.validate(log); err != nil { + return err + } + + log.Debug("Creating version struct") + ver, err := versionsapi.NewVersion(flags.ref, flags.stream, flags.version, flags.kind) + if err != nil { + return fmt.Errorf("creating version: %w", err) + } log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryRun, log) @@ -68,27 +79,27 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { }() log.Info("Adding version") - if err := ensureVersion(cmd.Context(), client, flags.version, versionsapi.GranularityMajor, log); err != nil { + if err := ensureVersion(cmd.Context(), client, flags.kind, ver, versionsapi.GranularityMajor, log); err != nil { return err } - if err := ensureVersion(cmd.Context(), client, flags.version, versionsapi.GranularityMinor, log); err != nil { + if err := ensureVersion(cmd.Context(), client, flags.kind, ver, versionsapi.GranularityMinor, log); err != nil { return err } if flags.latest { - if err := updateLatest(cmd.Context(), client, flags.version, log); err != nil { + if err := updateLatest(cmd.Context(), client, flags.kind, ver, log); err != nil { return fmt.Errorf("setting latest version: %w", err) } } - log.Info(fmt.Sprintf("List major->minor URL: %s", flags.version.ListURL(versionsapi.GranularityMajor))) - log.Info(fmt.Sprintf("List minor->patch URL: %s", flags.version.ListURL(versionsapi.GranularityMinor))) + log.Info(fmt.Sprintf("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor))) + log.Info(fmt.Sprintf("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor))) return nil } -func ensureVersion(ctx context.Context, client *versionsapi.Client, ver versionsapi.Version, gran versionsapi.Granularity, +func ensureVersion(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, gran versionsapi.Granularity, log *slog.Logger, ) error { verListReq := versionsapi.List{ @@ -96,7 +107,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, ver versions Stream: ver.Stream(), Granularity: gran, Base: ver.WithGranularity(gran), - Kind: ver.Kind(), + Kind: kind, } verList, err := client.FetchVersionList(ctx, verListReq) var notFoundErr *apiclient.NotFoundError @@ -106,7 +117,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, ver versions } else if err != nil { return fmt.Errorf("failed to list minor versions: %w", err) } - log.Debug(fmt.Sprintf("%q version list: %v", gran.String(), verList.Versions)) + log.Debug(fmt.Sprintf("%s version list: %v", gran.String(), verList)) insertGran := gran + 1 insertVersion := ver.WithGranularity(insertGran) @@ -118,7 +129,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, ver versions log.Info(fmt.Sprintf("Inserting %s version %q into list", insertGran.String(), insertVersion)) verList.Versions = append(verList.Versions, insertVersion) - log.Debug(fmt.Sprintf("New %q version list: %v", gran.String(), verList.Versions)) + log.Debug(fmt.Sprintf("New %s version list: %v", gran.String(), verList)) if err := client.UpdateVersionList(ctx, verList); err != nil { return fmt.Errorf("failed to add %s version: %w", gran.String(), err) @@ -128,11 +139,11 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, ver versions return nil } -func updateLatest(ctx context.Context, client *versionsapi.Client, ver versionsapi.Version, log *slog.Logger) error { +func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *slog.Logger) error { latest := versionsapi.Latest{ Ref: ver.Ref(), Stream: ver.Stream(), - Kind: ver.Kind(), + Kind: kind, } latest, err := client.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError @@ -152,7 +163,7 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, ver versionsa Ref: ver.Ref(), Stream: ver.Stream(), Version: ver.Version(), - Kind: ver.Kind(), + Kind: kind, } if err := client.UpdateVersionLatest(ctx, latest); err != nil { return fmt.Errorf("updating latest version: %w", err) @@ -162,20 +173,60 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, ver versionsa } type addFlags struct { - version versionsapi.Version + version string + stream string + ref string + release bool latest bool dryRun bool region string bucket string distributionID string + kind versionsapi.VersionKind logLevel slog.Level } +func (f *addFlags) validate(log *slog.Logger) error { + if !semver.IsValid(f.version) { + return fmt.Errorf("version %q is not a valid semantic version", f.version) + } + if semver.Canonical(f.version) != f.version { + return fmt.Errorf("version %q is not a canonical semantic version", f.version) + } + + if f.ref == "" && !f.release { + return fmt.Errorf("either --ref or --release must be set") + } + + if f.kind == versionsapi.VersionKindUnknown { + return fmt.Errorf("unknown version kind %q", f.kind) + } + + if f.release { + log.Debug(fmt.Sprintf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef)) + f.ref = versionsapi.ReleaseRef + } else { + log.Debug("Setting latest to true, as release flag is not set") + f.latest = true // always set latest for non-release versions + } + + if err := versionsapi.ValidateRef(f.ref); err != nil { + return fmt.Errorf("invalid ref %w", err) + } + + if err := versionsapi.ValidateStream(f.ref, f.stream); err != nil { + return fmt.Errorf("invalid stream %w", err) + } + + return nil +} + func parseAddFlags(cmd *cobra.Command) (addFlags, error) { ref, err := cmd.Flags().GetString("ref") if err != nil { return addFlags{}, err } + ref = versionsapi.CanonicalizeRef(ref) stream, err := cmd.Flags().GetString("stream") if err != nil { return addFlags{}, err @@ -222,24 +273,17 @@ func parseAddFlags(cmd *cobra.Command) (addFlags, error) { return addFlags{}, err } - if release { - ref = versionsapi.ReleaseRef - } else { - latest = true // always set latest for non-release versions - } - - ver, err := versionsapi.NewVersion(ref, stream, version, kind) - if err != nil { - return addFlags{}, fmt.Errorf("creating version: %w", err) - } - return addFlags{ - version: ver, + version: version, + stream: stream, + ref: versionsapi.CanonicalizeRef(ref), + release: release, latest: latest, dryRun: dryRun, region: region, bucket: bucket, distributionID: distributionID, logLevel: logLevel, + kind: kind, }, nil } diff --git a/internal/api/versionsapi/cli/latest.go b/internal/api/versionsapi/cli/latest.go index ca379c43f..797cfc64d 100644 --- a/internal/api/versionsapi/cli/latest.go +++ b/internal/api/versionsapi/cli/latest.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -39,7 +39,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "ref", flags.ref, "stream", flags.stream, "json", flags.json) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) log.Debug("Validating flags") if err := flags.validate(); err != nil { diff --git a/internal/api/versionsapi/cli/list.go b/internal/api/versionsapi/cli/list.go index 52cfc873b..717ba6c77 100644 --- a/internal/api/versionsapi/cli/list.go +++ b/internal/api/versionsapi/cli/list.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -44,8 +44,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "bucket", flags.bucket, "distributionID", flags.distributionID, "json", flags.json, "minorVersion", flags.minorVersion, - "ref", flags.ref, "region", flags.region, "stream", flags.stream) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) log.Debug("Validating flags") if err := flags.validate(); err != nil { diff --git a/internal/api/versionsapi/cli/main.go b/internal/api/versionsapi/cli/main.go index 2c173da1b..0410f8064 100644 --- a/internal/api/versionsapi/cli/main.go +++ b/internal/api/versionsapi/cli/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/api/versionsapi/cli/rm.go b/internal/api/versionsapi/cli/rm.go index 06757f441..51802b5fb 100644 --- a/internal/api/versionsapi/cli/rm.go +++ b/internal/api/versionsapi/cli/rm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -21,7 +21,7 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - armcomputev5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + armcomputev5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/smithy-go" @@ -75,9 +75,7 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { return err } log := logger.NewTextLogger(flags.logLevel) - log.Debug("Using flags", "all", flags.all, "azLocation", flags.azLocation, "azResourceGroup", flags.azResourceGroup, "azSubscription", flags.azSubscription, - "bucket", flags.bucket, "distributionID", flags.distributionID, "dryrun", flags.dryrun, "gcpProject", flags.gcpProject, "ref", flags.ref, - "region", flags.region, "stream", flags.stream, "version", flags.version, "versionPath", flags.versionPath) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) log.Debug("Validating flags") if err := flags.validate(); err != nil { @@ -140,12 +138,12 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error - log.Debug(fmt.Sprintf("Deleting images for %q", ver.Version())) + log.Debug(fmt.Sprintf("Deleting images for %s", ver.Version())) if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err)) } - log.Debug(fmt.Sprintf("Deleting version %q from versions API", ver.Version())) + log.Debug(fmt.Sprintf("Deleting version %s from versions API", ver.Version())) if err := clients.version.DeleteVersion(ctx, ver); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err)) } @@ -161,7 +159,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debug(fmt.Sprintf("No minor versions found for stream %q", stream)) + log.Debug(fmt.Sprintf("No minor versions found for stream %s", stream)) continue } else if err != nil { return fmt.Errorf("listing minor versions for stream %s: %w", stream, err) @@ -169,7 +167,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions) if errors.As(err, ¬FoundErr) { - log.Debug(fmt.Sprintf("No patch versions found for stream %q", stream)) + log.Debug(fmt.Sprintf("No patch versions found for stream %s", stream)) continue } else if err != nil { return fmt.Errorf("listing patch versions for stream %s: %w", stream, err) @@ -408,7 +406,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, return err } a.ec2 = ec2.NewFromConfig(cfg) - log.Debug(fmt.Sprintf("Deleting resources in AWS region %q", region)) + log.Debug(fmt.Sprintf("Deleting resources in AWS region %s", region)) snapshotID, err := a.getSnapshotID(ctx, ami, log) if err != nil { @@ -429,7 +427,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, } func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error { - log.Debug(fmt.Sprintf("Deregistering image %q", ami)) + log.Debug(fmt.Sprintf("Deregistering image %s", ami)) deregisterReq := ec2.DeregisterImageInput{ ImageId: &ami, @@ -448,7 +446,7 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool } func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) { - log.Debug(fmt.Sprintf("Describing image %q", ami)) + log.Debug(fmt.Sprintf("Describing image %s", ami)) req := ec2.DescribeImagesInput{ ImageIds: []string{ami}, @@ -484,7 +482,7 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Log } func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error { - log.Debug(fmt.Sprintf("Deleting AWS snapshot %q", snapshotID)) + log.Debug(fmt.Sprintf("Deleting AWS snapshot %s", snapshotID)) req := ec2.DeleteSnapshotInput{ SnapshotId: &snapshotID, @@ -538,11 +536,11 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo } if dryrun { - log.Debug(fmt.Sprintf("DryRun: delete image request: %q", req.String())) + log.Debug(fmt.Sprintf("DryRun: delete image request: %v", req)) return nil } - log.Debug(fmt.Sprintf("Deleting image %q", image)) + log.Debug(fmt.Sprintf("Deleting image %s", image)) op, err := g.compute.Delete(ctx, req) if err != nil && strings.Contains(err.Error(), "404") { log.Warn(fmt.Sprintf("GCP image %s not found", image)) @@ -633,7 +631,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool } if dryrun { - log.Debug(fmt.Sprintf("DryRun: delete image: gallery: %q, image definition: %q, resource group: %q, version: %q", azImage.gallery, azImage.imageDefinition, azImage.resourceGroup, azImage.version)) + log.Debug(fmt.Sprintf("DryRun: delete image %v", azImage)) return nil } @@ -665,7 +663,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left... - log.Debug(fmt.Sprintf("Deleting image definition %q", azImage.imageDefinition)) + log.Debug(fmt.Sprintf("Deleting image definition %s", azImage.imageDefinition)) op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) if err != nil { return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err) @@ -689,7 +687,7 @@ type azImage struct { func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) { if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 { log.Debug(fmt.Sprintf( - "Image matches local image format, resource group: %q, gallery: %q, image definition: %q, version: %q", + "Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s", m[1], m[2], m[3], m[4], )) return azImage{ @@ -710,7 +708,7 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo version := m[3] log.Debug(fmt.Sprintf( - "Image matches community image format, gallery public name: %q, image definition: %q, version: %q", + "Image matches community image format, gallery public name: %s, image definition: %s, version: %s", galleryPublicName, imageDefinition, version, )) @@ -727,15 +725,15 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo continue } if v.Properties.SharingProfile == nil { - log.Debug(fmt.Sprintf("Skipping gallery %q with nil sharing profile", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil sharing profile", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo == nil { - log.Debug(fmt.Sprintf("Skipping gallery %q with nil community gallery info", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil community gallery info", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil { - log.Debug(fmt.Sprintf("Skipping gallery %q with nil public names", *v.Name)) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil public names", *v.Name)) continue } for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames { diff --git a/internal/api/versionsapi/client.go b/internal/api/versionsapi/client.go index 295223d26..c03e8a7b6 100644 --- a/internal/api/versionsapi/client.go +++ b/internal/api/versionsapi/client.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi @@ -23,8 +23,6 @@ import ( type Client struct { *apiclient.Client clientClose func(ctx context.Context) error - - log *slog.Logger } // NewClient creates a new client for the versions API. @@ -33,9 +31,8 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu ) (*Client, CloseFunc, error) { genericClient, genericClientClose, err := apiclient.NewClient(ctx, region, bucket, distributionID, dryRun, log) versionsClient := &Client{ - Client: genericClient, - clientClose: genericClientClose, - log: log, + genericClient, + genericClientClose, } versionsClientClose := func(ctx context.Context) error { return versionsClient.Close(ctx) @@ -53,9 +50,8 @@ func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID strin return nil, nil, err } versionsClient := &Client{ - Client: genericClient, - clientClose: genericClientClose, - log: log, + genericClient, + genericClientClose, } versionsClientClose := func(ctx context.Context) error { return versionsClient.Close(ctx) @@ -135,18 +131,18 @@ func (c *Client) DeleteRef(ctx context.Context, ref string) error { func (c *Client) DeleteVersion(ctx context.Context, ver Version) error { var retErr error - c.log.Debug(fmt.Sprintf("Deleting version %q from minor version list", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Deleting version %s from minor version list", ver.version)) possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver) if err != nil { retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err)) } - c.log.Debug(fmt.Sprintf("Checking latest version for %q", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %s", ver.version)) if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil { retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err)) } - c.log.Debug(fmt.Sprintf("Deleting artifact path %q for %q", ver.ArtifactPath(APIV1), ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version)) if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err)) } @@ -163,20 +159,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers Base: ver.WithGranularity(GranularityMinor), Kind: VersionKindImage, } - c.log.Debug(fmt.Sprintf("Fetching minor version list for version %q", ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %s", ver.version)) minorList, err := c.FetchVersionList(ctx, minorList) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - c.log.Warn(fmt.Sprintf("Minor version list for version %s not found", ver.version)) - c.log.Warn("Skipping update of minor version list") + c.Client.Logger.Warn(fmt.Sprintf("Minor version list for version %s not found", ver.version)) + c.Client.Logger.Warn("Skipping update of minor version list") return nil, nil } else if err != nil { return nil, fmt.Errorf("fetching minor version list for version %s: %w", ver.version, err) } if !minorList.Contains(ver.version) { - c.log.Warn(fmt.Sprintf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath())) - c.log.Warn("Skipping update of minor version list") + c.Client.Logger.Warn(fmt.Sprintf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath())) + c.Client.Logger.Warn("Skipping update of minor version list") return nil, nil } @@ -196,20 +192,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers Kind: VersionKindImage, Version: minorList.Versions[len(minorList.Versions)-1], } - c.log.Debug(fmt.Sprintf("Possible latest version replacement %q", latest.Version)) + c.Client.Logger.Debug(fmt.Sprintf("Possible latest version replacement %q", latest.Version)) } if c.Client.DryRun { - c.log.Debug(fmt.Sprintf("DryRun: Updating minor version list %q to %v", minorList.JSONPath(), minorList)) + c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList)) return latest, nil } - c.log.Debug(fmt.Sprintf("Updating minor version list %q", minorList.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %s", minorList.JSONPath())) if err := c.UpdateVersionList(ctx, minorList); err != nil { return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err) } - c.log.Debug(fmt.Sprintf("Removed version %q from minor version list %q", ver.version, minorList.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath())) return latest, nil } @@ -220,33 +216,33 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi Stream: ver.stream, Kind: VersionKindImage, } - c.log.Debug(fmt.Sprintf("Fetching latest version from %q", latest.JSONPath())) + c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %s", latest.JSONPath())) latest, err := c.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - c.log.Warn(fmt.Sprintf("Latest version for %s not found", latest.JSONPath())) + c.Client.Logger.Warn(fmt.Sprintf("Latest version for %s not found", latest.JSONPath())) return nil } else if err != nil { return fmt.Errorf("fetching latest version: %w", err) } if latest.Version != ver.version { - c.log.Debug(fmt.Sprintf("Latest version is %q, not the deleted version %q", latest.Version, ver.version)) + c.Client.Logger.Debug(fmt.Sprintf("Latest version is %s, not the deleted version %s", latest.Version, ver.version)) return nil } if possibleNewLatest == nil { - c.log.Error(fmt.Sprintf("Latest version is %s, but no new latest version was found", latest.Version)) - c.log.Error(fmt.Sprintf("A manual update of latest at %s might be needed", latest.JSONPath())) + c.Client.Logger.Error(fmt.Sprintf("Latest version is %s, but no new latest version was found", latest.Version)) + c.Client.Logger.Error(fmt.Sprintf("A manual update of latest at %s might be needed", latest.JSONPath())) return fmt.Errorf("latest version is %s, but no new latest version was found", latest.Version) } if c.Client.DryRun { - c.log.Debug(fmt.Sprintf("Would update latest version from %q to %q", latest.Version, possibleNewLatest.Version)) + c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version)) return nil } - c.log.Info(fmt.Sprintf("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version)) + c.Client.Logger.Info(fmt.Sprintf("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version)) if err := c.UpdateVersionLatest(ctx, *possibleNewLatest); err != nil { return fmt.Errorf("updating latest version: %w", err) } diff --git a/internal/api/versionsapi/cliinfo.go b/internal/api/versionsapi/cliinfo.go index 1ec6ac3d0..ac7c18337 100644 --- a/internal/api/versionsapi/cliinfo.go +++ b/internal/api/versionsapi/cliinfo.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/cliinfo_test.go b/internal/api/versionsapi/cliinfo_test.go index 2b85852b0..f1f759f1b 100644 --- a/internal/api/versionsapi/cliinfo_test.go +++ b/internal/api/versionsapi/cliinfo_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/fetcher.go b/internal/api/versionsapi/fetcher.go index 407fe1a70..e17d7a376 100644 --- a/internal/api/versionsapi/fetcher.go +++ b/internal/api/versionsapi/fetcher.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/fetcher_test.go b/internal/api/versionsapi/fetcher_test.go index 87245a9f8..ff00ebdb6 100644 --- a/internal/api/versionsapi/fetcher_test.go +++ b/internal/api/versionsapi/fetcher_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi import ( "bytes" + "context" "encoding/json" "io" "net/http" @@ -191,7 +192,7 @@ func TestFetchVersionList(t *testing.T) { fetcher := Fetcher{client, constants.CDNRepositoryURL} - list, err := fetcher.FetchVersionList(t.Context(), tc.list) + list, err := fetcher.FetchVersionList(context.Background(), tc.list) if tc.wantErr { assert.Error(err) diff --git a/internal/api/versionsapi/imageinfo.go b/internal/api/versionsapi/imageinfo.go index 77a14f666..59d01c1f2 100644 --- a/internal/api/versionsapi/imageinfo.go +++ b/internal/api/versionsapi/imageinfo.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/imageinfo_test.go b/internal/api/versionsapi/imageinfo_test.go index 04a72e941..f239b42a2 100644 --- a/internal/api/versionsapi/imageinfo_test.go +++ b/internal/api/versionsapi/imageinfo_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/latest.go b/internal/api/versionsapi/latest.go index e360822fb..2c9f2a20b 100644 --- a/internal/api/versionsapi/latest.go +++ b/internal/api/versionsapi/latest.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/latest_test.go b/internal/api/versionsapi/latest_test.go index 562b5b4a3..a1b868d7b 100644 --- a/internal/api/versionsapi/latest_test.go +++ b/internal/api/versionsapi/latest_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/list.go b/internal/api/versionsapi/list.go index 262007371..9cef3ebaa 100644 --- a/internal/api/versionsapi/list.go +++ b/internal/api/versionsapi/list.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/list_test.go b/internal/api/versionsapi/list_test.go index b936de898..20aac9ab0 100644 --- a/internal/api/versionsapi/list_test.go +++ b/internal/api/versionsapi/list_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi diff --git a/internal/api/versionsapi/version.go b/internal/api/versionsapi/version.go index 0c969e0f6..12d1e8100 100644 --- a/internal/api/versionsapi/version.go +++ b/internal/api/versionsapi/version.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi @@ -41,7 +41,7 @@ type Version struct { // NewVersion creates a new Version object and validates it. func NewVersion(ref, stream, version string, kind VersionKind) (Version, error) { ver := Version{ - ref: CanonicalizeRef(ref), + ref: ref, stream: stream, version: version, kind: kind, @@ -62,7 +62,7 @@ func NewVersionFromShortPath(shortPath string, kind VersionKind) (Version, error } ver := Version{ - ref: ref, // Canonicalized by parseShortPath. + ref: ref, stream: stream, version: version, kind: kind, @@ -331,7 +331,7 @@ func CanonicalizeRef(ref string) string { canRef := notAZ09Regexp.ReplaceAllString(ref, "-") if canRef == ReleaseRef { - return "" // No ref should be canonicalized to the release ref. + return "" // No ref should be cannonicalized to the release ref. } return canRef @@ -401,7 +401,7 @@ func MeasurementURL(version Version) (measurementURL, signatureURL *url.URL, err } var ( - shortPathRegex = regexp.MustCompile(`^ref/([^/]+)/stream/([a-zA-Z0-9-]+)/([a-zA-Z0-9.-]+)$`) + shortPathRegex = regexp.MustCompile(`^ref/([a-zA-Z0-9-]+)/stream/([a-zA-Z0-9-]+)/([a-zA-Z0-9.-]+)$`) shortPathReleaseRegex = regexp.MustCompile(`^stream/([a-zA-Z0-9-]+)/([a-zA-Z0-9.-]+)$`) ) @@ -422,7 +422,6 @@ func parseShortPath(shortPath string) (ref, stream, version string, err error) { if shortPathRegex.MatchString(shortPath) { matches := shortPathRegex.FindStringSubmatch(shortPath) ref := matches[1] - ref = CanonicalizeRef(ref) if err := ValidateRef(ref); err != nil { return "", "", "", err } diff --git a/internal/api/versionsapi/version_test.go b/internal/api/versionsapi/version_test.go index c3631c90c..25f0f8ce0 100644 --- a/internal/api/versionsapi/version_test.go +++ b/internal/api/versionsapi/version_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versionsapi @@ -16,111 +16,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" ) -func TestNewVersion(t *testing.T) { - testCases := map[string]struct { - ref string - stream string - version string - kind VersionKind - wantVer Version - wantErr bool - }{ - "stable release image": { - ref: ReleaseRef, - stream: "stable", - version: "v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: ReleaseRef, - stream: "stable", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, - "release debug image": { - ref: ReleaseRef, - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: ReleaseRef, - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, - "stable release cli": { - ref: ReleaseRef, - stream: "stable", - version: "v9.9.9", - kind: VersionKindCLI, - wantVer: Version{ - ref: ReleaseRef, - stream: "stable", - version: "v9.9.9", - kind: VersionKindCLI, - }, - }, - "release debug cli": { - ref: ReleaseRef, - stream: "debug", - version: "v9.9.9", - kind: VersionKindCLI, - wantVer: Version{ - ref: ReleaseRef, - stream: "debug", - version: "v9.9.9", - kind: VersionKindCLI, - }, - }, - "unknown kind": { - ref: ReleaseRef, - stream: "debug", - version: "v9.9.9", - kind: VersionKindUnknown, - wantErr: true, - }, - "non-release ref as input": { - ref: "working-branch", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: "working-branch", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, - "non-canonical ref as input": { - ref: "testing-1.23", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: "testing-1-23", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - ver, err := NewVersion(tc.ref, tc.stream, tc.version, tc.kind) - if tc.wantErr { - assert.Error(err) - return - } - assert.NoError(err) - assert.Equal(tc.wantVer, ver) - }) - } -} - func TestNewVersionFromShortPath(t *testing.T) { testCases := map[string]struct { path string @@ -183,26 +78,6 @@ func TestNewVersionFromShortPath(t *testing.T) { kind: VersionKindCLI, wantErr: true, }, - "non-release ref as input": { - path: "ref/working-branch/stream/debug/v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: "working-branch", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, - "non-canonical ref as input": { - path: "ref/testing-1.23/stream/debug/v9.9.9", - kind: VersionKindImage, - wantVer: Version{ - ref: "testing-1-23", - stream: "debug", - version: "v9.9.9", - kind: VersionKindImage, - }, - }, } for name, tc := range testCases { diff --git a/internal/api/versionsapi/versionsapi.go b/internal/api/versionsapi/versionsapi.go index b89cd7577..54fa65123 100644 --- a/internal/api/versionsapi/versionsapi.go +++ b/internal/api/versionsapi/versionsapi.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/atls/atls.go b/internal/atls/atls.go index 9f42fb9c9..1fff52d17 100644 --- a/internal/atls/atls.go +++ b/internal/atls/atls.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // aTLS provides config generation functions to bootstrap attested TLS connections. @@ -70,7 +70,6 @@ func CreateAttestationClientTLSConfig(issuer Issuer, validators []Validator) (*t InsecureSkipVerify: true, // disable default verification because we use our own verify func ServerName: base64.StdEncoding.EncodeToString(clientNonce), // abuse ServerName as a channel to transmit the nonce MinVersion: tls.VersionTLS12, - NextProtos: []string{"http/1.1", "h2"}, // grpc-go requires us to advertise HTTP/2 (h2) over ALPN }, nil } @@ -115,7 +114,6 @@ func getATLSConfigForClientFunc(issuer Issuer, validators []Validator) (func(*tl VerifyPeerCertificate: serverConn.verify, GetCertificate: serverConn.getCertificate, MinVersion: tls.VersionTLS12, - NextProtos: []string{"http/1.1", "h2"}, // grpc-go requires us to advertise HTTP/2 (h2) over ALPN } // enable mutual aTLS if any validators are set diff --git a/internal/atls/atls_test.go b/internal/atls/atls_test.go index 03f1660c6..db315b3a1 100644 --- a/internal/atls/atls_test.go +++ b/internal/atls/atls_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package atls import ( + "context" "encoding/asn1" "errors" "io" @@ -161,7 +162,7 @@ func TestTLSConfig(t *testing.T) { server.StartTLS() defer server.Close() - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, server.URL, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) require.NoError(err) resp, err := client.Do(req) if tc.wantErr { @@ -220,7 +221,7 @@ func TestClientConnectionConcurrency(t *testing.T) { var reqs []*http.Request for _, url := range urls { - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, url, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) require.NoError(err) reqs = append(reqs, req) } @@ -294,7 +295,7 @@ func TestServerConnectionConcurrency(t *testing.T) { var reqs []*http.Request for _, url := range urls { - req, err := http.NewRequestWithContext(t.Context(), http.MethodGet, url, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) require.NoError(err) reqs = append(reqs, req) } diff --git a/internal/attestation/attestation.go b/internal/attestation/attestation.go index 21b918925..d5e458012 100644 --- a/internal/attestation/attestation.go +++ b/internal/attestation/attestation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/attestation_test.go b/internal/attestation/attestation_test.go index 482089dee..3615859c0 100644 --- a/internal/attestation/attestation_test.go +++ b/internal/attestation/attestation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package attestation diff --git a/internal/attestation/aws/aws.go b/internal/attestation/aws/aws.go index 5edaaea50..80806eccd 100644 --- a/internal/attestation/aws/aws.go +++ b/internal/attestation/aws/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/aws/nitrotpm/issuer.go b/internal/attestation/aws/nitrotpm/issuer.go index 5365eed82..e95b72d06 100644 --- a/internal/attestation/aws/nitrotpm/issuer.go +++ b/internal/attestation/aws/nitrotpm/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nitrotpm diff --git a/internal/attestation/aws/nitrotpm/issuer_test.go b/internal/attestation/aws/nitrotpm/issuer_test.go index eb2733775..59b5b7e47 100644 --- a/internal/attestation/aws/nitrotpm/issuer_test.go +++ b/internal/attestation/aws/nitrotpm/issuer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nitrotpm @@ -100,7 +100,7 @@ func TestGetInstanceInfo(t *testing.T) { instanceInfoFunc := getInstanceInfo(&tc.client) assert.NotNil(instanceInfoFunc) - info, err := instanceInfoFunc(t.Context(), tpm, nil) + info, err := instanceInfoFunc(context.Background(), tpm, nil) if tc.wantErr { assert.Error(err) assert.Nil(info) diff --git a/internal/attestation/aws/nitrotpm/nitrotpm.go b/internal/attestation/aws/nitrotpm/nitrotpm.go index a86c417fe..47c59db9b 100644 --- a/internal/attestation/aws/nitrotpm/nitrotpm.go +++ b/internal/attestation/aws/nitrotpm/nitrotpm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/aws/nitrotpm/validator.go b/internal/attestation/aws/nitrotpm/validator.go index ea60e1685..54d853c5d 100644 --- a/internal/attestation/aws/nitrotpm/validator.go +++ b/internal/attestation/aws/nitrotpm/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nitrotpm diff --git a/internal/attestation/aws/nitrotpm/validator_test.go b/internal/attestation/aws/nitrotpm/validator_test.go index a782894fa..0e6d086cd 100644 --- a/internal/attestation/aws/nitrotpm/validator_test.go +++ b/internal/attestation/aws/nitrotpm/validator_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nitrotpm @@ -42,7 +42,7 @@ func TestGeTrustedKey(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) out, err := getTrustedKey( - t.Context(), + context.Background(), vtpm.AttestationDocument{ Attestation: &attest.Attestation{ AkPub: tc.akPub, diff --git a/internal/attestation/aws/snp/BUILD.bazel b/internal/attestation/aws/snp/BUILD.bazel index f8287da48..f08964307 100644 --- a/internal/attestation/aws/snp/BUILD.bazel +++ b/internal/attestation/aws/snp/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "//internal/attestation/vtpm", "//internal/config", "@com_github_google_go_sev_guest//abi", + "@com_github_google_go_sev_guest//client", "@com_github_google_go_sev_guest//kds", "@com_github_google_go_sev_guest//proto/sevsnp", "@com_github_google_go_sev_guest//validate", diff --git a/internal/attestation/aws/snp/errors.go b/internal/attestation/aws/snp/errors.go index b20291b5e..2b07870b7 100644 --- a/internal/attestation/aws/snp/errors.go +++ b/internal/attestation/aws/snp/errors.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp diff --git a/internal/attestation/aws/snp/issuer.go b/internal/attestation/aws/snp/issuer.go index 040a19a94..e3d58ab79 100644 --- a/internal/attestation/aws/snp/issuer.go +++ b/internal/attestation/aws/snp/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -21,6 +21,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" "github.com/google/go-sev-guest/abi" + sevclient "github.com/google/go-sev-guest/client" "github.com/google/go-tpm-tools/client" tpmclient "github.com/google/go-tpm-tools/client" ) @@ -69,7 +70,13 @@ func getInstanceInfo(_ context.Context, tpm io.ReadWriteCloser, _ []byte) ([]byt akDigest := sha512.Sum512(encoded) - report, certs, err := snp.GetExtendedReport(akDigest) + device, err := sevclient.OpenDevice() + if err != nil { + return nil, fmt.Errorf("opening sev device: %w", err) + } + defer device.Close() + + report, certs, err := sevclient.GetRawExtendedReportAtVmpl(device, akDigest, 0) if err != nil { return nil, fmt.Errorf("getting extended report: %w", err) } diff --git a/internal/attestation/aws/snp/issuer_test.go b/internal/attestation/aws/snp/issuer_test.go index d92cad9c9..3f2f24699 100644 --- a/internal/attestation/aws/snp/issuer_test.go +++ b/internal/attestation/aws/snp/issuer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp diff --git a/internal/attestation/aws/snp/snp.go b/internal/attestation/aws/snp/snp.go index 4b5f31d6a..4085f50bb 100644 --- a/internal/attestation/aws/snp/snp.go +++ b/internal/attestation/aws/snp/snp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/aws/snp/testdata/report.txt b/internal/attestation/aws/snp/testdata/report.txt index e413ca309..efd90375f 100644 --- a/internal/attestation/aws/snp/testdata/report.txt +++ b/internal/attestation/aws/snp/testdata/report.txt @@ -1 +1 @@ -AwAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAY3CcAAAAAAAAABAAAAAAAAACHq3yvUQ4bNSDcPM62TuRBKOEJdvsNP8XidGmdiq9QYVSvTB3goCa0n9+GHprHVVFVGzU00cYTaaOwj1uu0NsvWlzbrY9UDOSrygEg+uyG9i73EopKxxGt001Gi1lXyCqi8k6PER2tw2ibeHuI7QcVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARowRHpiQyfxJKbRS+DVfQGWxQvKf1S21qaW2zACl7rf//////////////////////////////////////////BAAAAAAAGNkZAQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAGNsdNwEAHTcBAAQAAAAAABjbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATEij8MQ3cc95xvjozFQCY/3yYhrUJa6qN5kOaH0eHbuMzQ0iOgY3m6riTBYsQlksAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAt0MH84001UcDpwNKn6LJSVfidlQxQ2nAM6WGsDjMvA4Z8WcYJeQhgpcDL7YJ+dbpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= +AgAAAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAK0QMAAAAAAAAABAAAAAAAAAADJjVhPI4zH6KeCWNxkQ/mofaTg92gLJRhQApwtm2Ho9pd2GMAJSK+Q6/DTywjOYm9bkAeNR0Q18yADW9d/PAZJayBD1xHUIkPsaFY8JeWLgTU1/tkDR0IqZgpz0pwVDpHzG+xkrvpCqcTFCNhpmFVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACOsAob9aWVVnjx8VNbU/bqGewnLGnBSZbJu8smGfzcN///////////////////////////////////////////AwAAAAAACnMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAACqkBNgEAATYBAAMAAAAAAAqpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAm8z1/Oxcd+Bhdxd1okDoZ9gMiYw5Y/fp74hylcA2Eu+XPt5p+7fqqG7d7YLdJtTuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIOZBrwmRpIFfKDCywiFaiILyguTq/6vefDmdzNBKiRKtjdNiHa0hNgeQFGHspRcZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= diff --git a/internal/attestation/aws/snp/testdata/testdata.go b/internal/attestation/aws/snp/testdata/testdata.go index 504693be7..61d14f154 100644 --- a/internal/attestation/aws/snp/testdata/testdata.go +++ b/internal/attestation/aws/snp/testdata/testdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdata contains testing data for an attestation process. @@ -15,7 +15,7 @@ import _ "embed" var SNPReport string // AKDigest holds the AK digest embedded in SNPReport.REPORT_DATA. -const AKDigest = "87ab7caf510e1b3520dc3cceb64ee44128e10976fb0d3fc5e274699d8aaf506154af4c1de0a026b49fdf861e9ac75551551b3534d1c61369a3b08f5baed0db2f" +const AKDigest = "032635613c8e331fa29e096371910fe6a1f69383dda02c9461400a70b66d87a3da5dd863002522be43afc34f2c233989bd6e401e351d10d7cc800d6f5dfcf019" // VLEK for SNPReport. // diff --git a/internal/attestation/aws/snp/testdata/vlek.pem b/internal/attestation/aws/snp/testdata/vlek.pem index 3f8be0331..406a84235 100644 --- a/internal/attestation/aws/snp/testdata/vlek.pem +++ b/internal/attestation/aws/snp/testdata/vlek.pem @@ -1,30 +1,30 @@ -----BEGIN CERTIFICATE----- -MIIFIzCCAtegAwIBAgIBADBBBgkqhkiG9w0BAQowNKAPMA0GCWCGSAFlAwQCAgUA -oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAgUAogMCATAwgYAxFDASBgNVBAsM -C0VuZ2luZWVyaW5nMQswCQYDVQQGEwJVUzEUMBIGA1UEBwwLU2FudGEgQ2xhcmEx -CzAJBgNVBAgMAkNBMR8wHQYDVQQKDBZBZHZhbmNlZCBNaWNybyBEZXZpY2VzMRcw -FQYDVQQDDA5TRVYtVkxFSy1NaWxhbjAeFw0yNDEyMTAyMjMwMTZaFw0yNTEyMTAy -MjMwMTZaMHoxFDASBgNVBAsMC0VuZ2luZWVyaW5nMQswCQYDVQQGEwJVUzEUMBIG -A1UEBwwLU2FudGEgQ2xhcmExCzAJBgNVBAgMAkNBMR8wHQYDVQQKDBZBZHZhbmNl -ZCBNaWNybyBEZXZpY2VzMREwDwYDVQQDDAhTRVYtVkxFSzB2MBAGByqGSM49AgEG -BSuBBAAiA2IABJRw6hwLZt7KX95uPePz/3Gt/z9mm/32f0JpE2twW8w6DQ1xOPnW -YRLJeMSZNpaYW/NRpNf0vfy5IDQt44didvu+37x2aqyaneFiBh5jTxSg/2dCZ+bi -4eZw/p0Us7bubqOB8jCB7zAQBgkrBgEEAZx4AQEEAwIBADAUBgkrBgEEAZx4AQIE -BxYFTWlsYW4wEQYKKwYBBAGceAEDAQQDAgEEMBEGCisGAQQBnHgBAwIEAwIBADAR -BgorBgEEAZx4AQMEBAMCAQAwEQYKKwYBBAGceAEDBQQDAgEAMBEGCisGAQQBnHgB -AwYEAwIBADARBgorBgEEAZx4AQMHBAMCAQAwEQYKKwYBBAGceAEDAwQDAgEYMBIG -CisGAQQBnHgBAwgEBAICANkwLAYJKwYBBAGceAEFBB8WHUNOPWNjLWV1LXdlc3Qt -MS5hbWF6b25hd3MuY29tMEEGCSqGSIb3DQEBCjA0oA8wDQYJYIZIAWUDBAICBQCh -HDAaBgkqhkiG9w0BAQgwDQYJYIZIAWUDBAICBQCiAwIBMAOCAgEAar1tA7vYelxK -uj+r7APOEPcAAoF7RWZs6ixDlXHuFVj2rfxqmxt8nqjedEKBfUGPCEsbAV+Z/bj9 -GqN+q5Bn1yk6RL/VqxTxTVhpa0G33R87UjE+S+42k6ENgddbl4hxws5g83Sn9All -/XjNPHmciWjmix4PJs5tZv+YaJ15BSBkJfrTRo+rX3UDKeqUHNoX+Cx6D7ECF/6k -ToFlHBEBqHKa2EzhNMK2UXm/vm0ATSaNHuDEGBvzbXflPmHZi1RZqQ7q9VMenFDx -JwAgnUrltcuLjKMID7c2yj+Emk/CBEEFoAJRfSoSvMdhqrNaRlbEqEnQ95C/XNPn -Mqtk5Ao/UVV5fRXYSt5oGKTBGhqTwv+Xqyei+/IgpcJyGPFbHVX9UPteP4RnSLiq -uJ3oRIvyEw+u6bkMNBBAjh4C+Jp2BVrLs1aC0h9fjfVEofWTb/NioJRigKTNfbao -sTy6tX8qoUSxtp/bIqK1jg1Y7eIDIMCgqnm0N+hJT7CnkwyCBUkOHmsExzQcthmg -y0J1J7bTA507rY5ZglNSRLCXqAfORVxIBwTaOXrJV2lMLScTUdnhFrVPFUAl7uCj -rKta1iGye+fieoYncdHLIVyIJGsTC+AbhPIAR2Zh847Sxw1SVOobTPc0wUIoKrOU -xR32EkufsNGLb8TiEsgpa2ulbw8xi6U= +MIIFLDCCAtugAwIBAgIBADBGBgkqhkiG9w0BAQowOaAPMA0GCWCGSAFlAwQCAgUA +oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAgUAogMCATCjAwIBATCBgDEUMBIG +A1UECwwLRW5naW5lZXJpbmcxCzAJBgNVBAYTAlVTMRQwEgYDVQQHDAtTYW50YSBD +bGFyYTELMAkGA1UECAwCQ0ExHzAdBgNVBAoMFkFkdmFuY2VkIE1pY3JvIERldmlj +ZXMxFzAVBgNVBAMMDlNFVi1WTEVLLU1pbGFuMB4XDTIzMDcxOTA4MjkyOFoXDTI0 +MDcxOTA4MjkyOFowejEUMBIGA1UECwwLRW5naW5lZXJpbmcxCzAJBgNVBAYTAlVT +MRQwEgYDVQQHDAtTYW50YSBDbGFyYTELMAkGA1UECAwCQ0ExHzAdBgNVBAoMFkFk +dmFuY2VkIE1pY3JvIERldmljZXMxETAPBgNVBAMMCFNFVi1WTEVLMHYwEAYHKoZI +zj0CAQYFK4EEACIDYgAEXFl4NHpiQCuZXIrehIEk/5XNIdMvo24wyaezN+0FouYB +9Z23nL523gpJUlT+mvb5ZMybh5tO1nBGFMOKwzP9dnSBwTs0qn57Ts9OTpW57EAo +Mx4SI7g1yz/mt4e6hma4o4HxMIHuMBAGCSsGAQQBnHgBAQQDAgEAMBQGCSsGAQQB +nHgBAgQHFgVNaWxhbjARBgorBgEEAZx4AQMBBAMCAQMwEQYKKwYBBAGceAEDAgQD +AgEAMBEGCisGAQQBnHgBAwQEAwIBADARBgorBgEEAZx4AQMFBAMCAQAwEQYKKwYB +BAGceAEDBgQDAgEAMBEGCisGAQQBnHgBAwcEAwIBADARBgorBgEEAZx4AQMDBAMC +AQowEQYKKwYBBAGceAEDCAQDAgFzMCwGCSsGAQQBnHgBBQQfFh1DTj1jYy11cy1l +YXN0LTIuYW1hem9uYXdzLmNvbTBGBgkqhkiG9w0BAQowOaAPMA0GCWCGSAFlAwQC +AgUAoRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAgUAogMCATCjAwIBAQOCAgEA +E2CR10QkVTofcjmQbuu787J+H+OjzQLPIi/dUbP/LvZdYi/eWglYQPRbYxhxnIi1 +PB9R9c7LLhbNRhroog+TzrxyKLibEAW3rwn2iygPnsIemyL89wqtPNqEKNjhBXsb +s/0bmf0rNJ3lugssCAzrIStkx8at0K/099BEs4FuUM5u97HVy+jqLdRa2XOHMgGa +K7sNdR4swuLhfts9gOOX8ntJ+XkxtUx2mz449fXn8KN70mKa2YShhNd2JWJmv1jW +K0I1UxVVwIOHBn/W8fQL5a061oRQQaW5+wPRTys0iEMmLU7+plC8LNWeEq93TfFY +eUZ9EzinZ5S7z+c8J1FVWYNHGJauWj4lkjf+XGUZqXwTCPzou6tYJqqwWQEUUxXC +M3QKgbkIGWg4WKHIAXGChbM86JLY0W6VueOHyu4S1Z4i81IcDp4cs83WxYWfCpKH +Fq3Si2BhzZ0YGgK25JCkomh5Yf7dlsByyuQssf3TCqNmOfSFOTLvxfwTvLD5Omlm +O1mPI0YaoZya4WcPxbpWS+2Em23/5inQvT+ZhvMNkljD2NVbhLVGP1v4YR+T2zaC +0qJ4YYJ2ERQTnEUlKnlF9bm6PwZSRHupK6ecsGjH+Bz5hBPbT09nEpJf0bWkzVSA +AY8POFt3zBJiqONQuOlBpXzqKRKvFYQVEaX2EXQ+W6s= -----END CERTIFICATE----- diff --git a/internal/attestation/aws/snp/validator.go b/internal/attestation/aws/snp/validator.go index 92431929f..22d8b814b 100644 --- a/internal/attestation/aws/snp/validator.go +++ b/internal/attestation/aws/snp/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -31,7 +31,7 @@ import ( // Validator for AWS TPM attestation. type Validator struct { - // Embed variant to identify the Validator using variant.OID(). + // Embed variant to identify the Validator using varaint.OID(). variant.AWSSEVSNP // Embed validator to implement Validate method for aTLS handshake. *vtpm.Validator @@ -191,11 +191,11 @@ func (a *awsValidator) validate(attestation vtpm.AttestationDocument, ask *x509. func getVerifyOpts(att *sevsnp.Attestation) (*verify.Options, error) { ask, err := x509.ParseCertificate(att.CertificateChain.AskCert) if err != nil { - return nil, fmt.Errorf("parsing ASK certificate: %w", err) + return &verify.Options{}, fmt.Errorf("parsing VLEK certificate: %w", err) } ark, err := x509.ParseCertificate(att.CertificateChain.ArkCert) if err != nil { - return nil, fmt.Errorf("parsing ARK certificate: %w", err) + return &verify.Options{}, fmt.Errorf("parsing VLEK certificate: %w", err) } verifyOpts := &verify.Options{ diff --git a/internal/attestation/aws/snp/validator_test.go b/internal/attestation/aws/snp/validator_test.go index 567791daf..84804a886 100644 --- a/internal/attestation/aws/snp/validator_test.go +++ b/internal/attestation/aws/snp/validator_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp import ( "bytes" + "context" "crypto" "crypto/x509" "encoding/base64" @@ -66,7 +67,7 @@ func TestGetTrustedKey(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) out, err := validator().getTrustedKey( - t.Context(), + context.Background(), vtpm.AttestationDocument{ Attestation: &attest.Attestation{ AkPub: tc.akPub, diff --git a/internal/attestation/azure/azure.go b/internal/attestation/azure/azure.go index 655be1b2c..2fbe30567 100644 --- a/internal/attestation/azure/azure.go +++ b/internal/attestation/azure/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/azure/azure_test.go b/internal/attestation/azure/azure_test.go index d73db409e..0e23f1fce 100644 --- a/internal/attestation/azure/azure_test.go +++ b/internal/attestation/azure/azure_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azure diff --git a/internal/attestation/azure/snp/imds.go b/internal/attestation/azure/snp/imds.go index 8a5547637..9b1c24cef 100644 --- a/internal/attestation/azure/snp/imds.go +++ b/internal/attestation/azure/snp/imds.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp diff --git a/internal/attestation/azure/snp/issuer.go b/internal/attestation/azure/snp/issuer.go index b3ab8bdf7..f0e8bb6f0 100644 --- a/internal/attestation/azure/snp/issuer.go +++ b/internal/attestation/azure/snp/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp diff --git a/internal/attestation/azure/snp/issuer_test.go b/internal/attestation/azure/snp/issuer_test.go index 45116303c..224937be2 100644 --- a/internal/attestation/azure/snp/issuer_test.go +++ b/internal/attestation/azure/snp/issuer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -82,7 +82,7 @@ func TestGetSNPAttestation(t *testing.T) { data := []byte("data") - attestationJSON, err := issuer.getInstanceInfo(t.Context(), nil, data) + attestationJSON, err := issuer.getInstanceInfo(context.Background(), nil, data) if tc.wantErr { assert.Error(err) return diff --git a/internal/attestation/azure/snp/maa.go b/internal/attestation/azure/snp/maa.go index a51348925..2cf82766a 100644 --- a/internal/attestation/azure/snp/maa.go +++ b/internal/attestation/azure/snp/maa.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp diff --git a/internal/attestation/azure/snp/snp.go b/internal/attestation/azure/snp/snp.go index 84895748b..8c109d25a 100644 --- a/internal/attestation/azure/snp/snp.go +++ b/internal/attestation/azure/snp/snp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/azure/snp/validator.go b/internal/attestation/azure/snp/validator.go index 701f526ff..a4b58e4d4 100644 --- a/internal/attestation/azure/snp/validator.go +++ b/internal/attestation/azure/snp/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -116,11 +116,25 @@ func (v *Validator) getTrustedKey(ctx context.Context, attDoc vtpm.AttestationDo return nil, fmt.Errorf("parsing attestation report: %w", err) } - verifyOpts, err := getVerifyOpts(att) + // ASK, as cached in joinservice or reported from THIM / KDS. + ask, err := x509.ParseCertificate(att.CertificateChain.AskCert) if err != nil { - return nil, fmt.Errorf("getting verify options: %w", err) + return nil, fmt.Errorf("parsing ASK certificate: %w", err) } + verifyOpts := &verify.Options{ + TrustedRoots: map[string][]*trust.AMDRootCerts{ + "Milan": { + { + Product: "Milan", + ProductCerts: &trust.ProductCerts{ + Ask: ask, + Ark: trustedArk, + }, + }, + }, + }, + } if err := v.attestationVerifier.SNPAttestation(att, verifyOpts); err != nil { return nil, fmt.Errorf("verifying SNP attestation: %w", err) } @@ -238,31 +252,3 @@ type maaValidator interface { type hclAkValidator interface { Validate(runtimeDataRaw []byte, reportData []byte, rsaParameters *tpm2.RSAParams) error } - -func getVerifyOpts(att *spb.Attestation) (*verify.Options, error) { - // ASK, as cached in joinservice or reported from THIM / KDS. - ask, err := x509.ParseCertificate(att.CertificateChain.AskCert) - if err != nil { - return nil, fmt.Errorf("parsing ASK certificate: %w", err) - } - ark, err := x509.ParseCertificate(att.CertificateChain.ArkCert) - if err != nil { - return nil, fmt.Errorf("parsing ARK certificate: %w", err) - } - - verifyOpts := &verify.Options{ - TrustedRoots: map[string][]*trust.AMDRootCerts{ - "Milan": { - { - Product: "Milan", - ProductCerts: &trust.ProductCerts{ - Ask: ask, - Ark: ark, - }, - }, - }, - }, - } - - return verifyOpts, nil -} diff --git a/internal/attestation/azure/snp/validator_test.go b/internal/attestation/azure/snp/validator_test.go index 9b382fb08..f07428461 100644 --- a/internal/attestation/azure/snp/validator_test.go +++ b/internal/attestation/azure/snp/validator_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -182,7 +182,7 @@ func TestCheckIDKeyDigest(t *testing.T) { report := reportWithIDKeyDigest(tc.idKeyDigest) validator := newTestValidator(cfg, tc.validateMaaTokenErr) - err := validator.checkIDKeyDigest(t.Context(), report, "", nil) + err := validator.checkIDKeyDigest(context.Background(), report, "", nil) if tc.wantErr { require.Error(err) } else { @@ -368,7 +368,7 @@ func TestTrustedKeyFromSNP(t *testing.T) { ), wantErr: true, assertion: func(assert *assert.Assertions, err error) { - assert.ErrorContains(err, "x509: malformed certificate") + assert.ErrorContains(err, "could not interpret VCEK DER bytes: x509: malformed certificate") }, }, "invalid certchain fall back to embedded": { @@ -650,7 +650,7 @@ func TestTrustedKeyFromSNP(t *testing.T) { attestationValidator: tc.validator, } - key, err := validator.getTrustedKey(t.Context(), attDoc, nil) + key, err := validator.getTrustedKey(context.Background(), attDoc, nil) if tc.wantErr { assert.Error(err) if tc.assertion != nil { diff --git a/internal/attestation/azure/tdx/issuer.go b/internal/attestation/azure/tdx/issuer.go index 1cb051694..e04b066a6 100644 --- a/internal/attestation/azure/tdx/issuer.go +++ b/internal/attestation/azure/tdx/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package tdx @@ -90,7 +90,7 @@ func (i *Issuer) getInstanceInfo(ctx context.Context, tpm io.ReadWriteCloser, _ return nil, fmt.Errorf("getting quote: %w", err) } - instanceInfo := InstanceInfo{ + instanceInfo := instanceInfo{ AttestationReport: quote, RuntimeData: runtimeData, } diff --git a/internal/attestation/azure/tdx/issuer_test.go b/internal/attestation/azure/tdx/issuer_test.go index dcb248aac..e57e63aa0 100644 --- a/internal/attestation/azure/tdx/issuer_test.go +++ b/internal/attestation/azure/tdx/issuer_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package tdx import ( "bytes" + "context" "encoding/binary" "encoding/json" "io" @@ -134,7 +135,7 @@ func TestIMDSGetQuote(t *testing.T) { client: tc.client, } - _, err := quoteGetter.getQuote(t.Context(), []byte("test")) + _, err := quoteGetter.getQuote(context.Background(), []byte("test")) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/attestation/azure/tdx/tdx.go b/internal/attestation/azure/tdx/tdx.go index cbf34b4a4..815a43ae2 100644 --- a/internal/attestation/azure/tdx/tdx.go +++ b/internal/attestation/azure/tdx/tdx.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -19,8 +19,7 @@ More specifically: */ package tdx -// InstanceInfo wraps the TDX report with additional Azure specific runtime data. -type InstanceInfo struct { +type instanceInfo struct { AttestationReport []byte RuntimeData []byte } diff --git a/internal/attestation/azure/tdx/testdata/testdata.go b/internal/attestation/azure/tdx/testdata/testdata.go index 08902b9eb..eaccd11a2 100644 --- a/internal/attestation/azure/tdx/testdata/testdata.go +++ b/internal/attestation/azure/tdx/testdata/testdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdata contains testing data for an attestation process. diff --git a/internal/attestation/azure/tdx/validator.go b/internal/attestation/azure/tdx/validator.go index 2dedf2390..da8dc0eee 100644 --- a/internal/attestation/azure/tdx/validator.go +++ b/internal/attestation/azure/tdx/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package tdx @@ -58,7 +58,7 @@ func NewValidator(cfg *config.AzureTDX, log attestation.Logger) *Validator { } func (v *Validator) getTrustedTPMKey(_ context.Context, attDoc vtpm.AttestationDocument, _ []byte) (crypto.PublicKey, error) { - var instanceInfo InstanceInfo + var instanceInfo instanceInfo if err := json.Unmarshal(attDoc.InstanceInfo, &instanceInfo); err != nil { return nil, err } @@ -93,24 +93,25 @@ func (v *Validator) validateQuote(tdxQuote *tdx.QuoteV4) error { roots.AddCert((*x509.Certificate)(&v.cfg.IntelRootKey)) if err := verify.TdxQuote(tdxQuote, &verify.Options{ - CheckRevocations: true, - GetCollateral: true, - TrustedRoots: roots, - Getter: v.getter, + // TODO: Re-enable CRL checking once issues on Azure's side are resolved. + // CheckRevocations: true, + // GetCollateral: true, + TrustedRoots: roots, + Getter: v.getter, }); err != nil { return err } if err := validate.TdxQuote(tdxQuote, &validate.Options{ HeaderOptions: validate.HeaderOptions{ - MinimumQeSvn: v.cfg.QESVN.Value, - MinimumPceSvn: v.cfg.PCESVN.Value, - QeVendorID: v.cfg.QEVendorID.Value, + MinimumQeSvn: v.cfg.QESVN, + MinimumPceSvn: v.cfg.PCESVN, + QeVendorID: v.cfg.QEVendorID, }, TdQuoteBodyOptions: validate.TdQuoteBodyOptions{ - MinimumTeeTcbSvn: v.cfg.TEETCBSVN.Value, + MinimumTeeTcbSvn: v.cfg.TEETCBSVN, MrSeam: v.cfg.MRSeam, - Xfam: v.cfg.XFAM.Value, + Xfam: v.cfg.XFAM, }, }); err != nil { return err diff --git a/internal/attestation/azure/trustedlaunch/issuer.go b/internal/attestation/azure/trustedlaunch/issuer.go index eaccfc06d..f62a88104 100644 --- a/internal/attestation/azure/trustedlaunch/issuer.go +++ b/internal/attestation/azure/trustedlaunch/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package trustedlaunch diff --git a/internal/attestation/azure/trustedlaunch/trustedlaunch.go b/internal/attestation/azure/trustedlaunch/trustedlaunch.go index 822ee86e9..8959bc32f 100644 --- a/internal/attestation/azure/trustedlaunch/trustedlaunch.go +++ b/internal/attestation/azure/trustedlaunch/trustedlaunch.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go b/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go index 1fe9a3989..a3bef9ed9 100644 --- a/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go +++ b/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package trustedlaunch import ( "bytes" + "context" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -191,7 +192,7 @@ func TestGetAttestationCert(t *testing.T) { issuer := NewIssuer(logger.NewTest(t)) issuer.hClient = newTestClient(tc.crlServer) - certs, err := issuer.getAttestationCert(t.Context(), tpm, nil) + certs, err := issuer.getAttestationCert(context.Background(), tpm, nil) if tc.wantIssueErr { assert.Error(err) return @@ -212,7 +213,7 @@ func TestGetAttestationCert(t *testing.T) { roots.AddCert(cert) validator.roots = roots - key, err := validator.verifyAttestationKey(t.Context(), attDoc, nil) + key, err := validator.verifyAttestationKey(context.Background(), attDoc, nil) if tc.wantValidateErr { assert.Error(err) return diff --git a/internal/attestation/azure/trustedlaunch/validator.go b/internal/attestation/azure/trustedlaunch/validator.go index 4cdefb580..368db8368 100644 --- a/internal/attestation/azure/trustedlaunch/validator.go +++ b/internal/attestation/azure/trustedlaunch/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package trustedlaunch diff --git a/internal/attestation/choose/BUILD.bazel b/internal/attestation/choose/BUILD.bazel index 09bd9d2b9..dfb1938e4 100644 --- a/internal/attestation/choose/BUILD.bazel +++ b/internal/attestation/choose/BUILD.bazel @@ -14,8 +14,7 @@ go_library( "//internal/attestation/azure/snp", "//internal/attestation/azure/tdx", "//internal/attestation/azure/trustedlaunch", - "//internal/attestation/gcp/es", - "//internal/attestation/gcp/snp", + "//internal/attestation/gcp", "//internal/attestation/qemu", "//internal/attestation/tdx", "//internal/attestation/variant", diff --git a/internal/attestation/choose/choose.go b/internal/attestation/choose/choose.go index 6918210b2..3ce936085 100644 --- a/internal/attestation/choose/choose.go +++ b/internal/attestation/choose/choose.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package choose @@ -16,8 +16,7 @@ import ( azuresnp "github.com/edgelesssys/constellation/v2/internal/attestation/azure/snp" azuretdx "github.com/edgelesssys/constellation/v2/internal/attestation/azure/tdx" "github.com/edgelesssys/constellation/v2/internal/attestation/azure/trustedlaunch" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp/es" - gcpsnp "github.com/edgelesssys/constellation/v2/internal/attestation/gcp/snp" + "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" "github.com/edgelesssys/constellation/v2/internal/attestation/qemu" "github.com/edgelesssys/constellation/v2/internal/attestation/tdx" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" @@ -38,9 +37,7 @@ func Issuer(attestationVariant variant.Variant, log attestation.Logger) (atls.Is case variant.AzureTDX{}: return azuretdx.NewIssuer(log), nil case variant.GCPSEVES{}: - return es.NewIssuer(log), nil - case variant.GCPSEVSNP{}: - return gcpsnp.NewIssuer(log), nil + return gcp.NewIssuer(log), nil case variant.QEMUVTPM{}: return qemu.NewIssuer(log), nil case variant.QEMUTDX{}: @@ -66,9 +63,7 @@ func Validator(cfg config.AttestationCfg, log attestation.Logger) (atls.Validato case *config.AzureTDX: return azuretdx.NewValidator(cfg, log), nil case *config.GCPSEVES: - return es.NewValidator(cfg, log) - case *config.GCPSEVSNP: - return gcpsnp.NewValidator(cfg, log) + return gcp.NewValidator(cfg, log), nil case *config.QEMUVTPM: return qemu.NewValidator(cfg, log), nil case *config.QEMUTDX: diff --git a/internal/attestation/choose/choose_test.go b/internal/attestation/choose/choose_test.go index 6cc20cdb8..33ca1849e 100644 --- a/internal/attestation/choose/choose_test.go +++ b/internal/attestation/choose/choose_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package choose @@ -40,9 +40,6 @@ func TestIssuer(t *testing.T) { "gcp-sev-es": { variant: variant.GCPSEVES{}, }, - "gcp-sev-snp": { - variant: variant.GCPSEVSNP{}, - }, "qemu-vtpm": { variant: variant.QEMUVTPM{}, }, @@ -92,9 +89,6 @@ func TestValidator(t *testing.T) { "gcp-sev-es": { cfg: &config.GCPSEVES{}, }, - "gcp-sev-snp": { - cfg: &config.GCPSEVSNP{}, - }, "qemu-vtpm": { cfg: &config.QEMUVTPM{}, }, diff --git a/internal/attestation/gcp/BUILD.bazel b/internal/attestation/gcp/BUILD.bazel index 8b8c24d8c..7cabb294b 100644 --- a/internal/attestation/gcp/BUILD.bazel +++ b/internal/attestation/gcp/BUILD.bazel @@ -1,18 +1,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//bazel/go:go_test.bzl", "go_test") go_library( name = "gcp", srcs = [ "gcp.go", - "metadata.go", - "restclient.go", + "issuer.go", + "validator.go", ], importpath = "github.com/edgelesssys/constellation/v2/internal/attestation/gcp", visibility = ["//:__subpackages__"], deps = [ - "//internal/attestation/snp", + "//internal/attestation", "//internal/attestation/variant", "//internal/attestation/vtpm", + "//internal/config", + "@com_github_google_go_tpm_tools//client", "@com_github_google_go_tpm_tools//proto/attest", "@com_github_googleapis_gax_go_v2//:gax-go", "@com_google_cloud_go_compute//apiv1", @@ -21,3 +24,22 @@ go_library( "@org_golang_google_api//option", ], ) + +go_test( + name = "gcp_test", + srcs = [ + "issuer_test.go", + "validator_test.go", + ], + embed = [":gcp"], + deps = [ + "//internal/attestation/vtpm", + "@com_github_google_go_tpm_tools//proto/attest", + "@com_github_googleapis_gax_go_v2//:gax-go", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + "@com_google_cloud_go_compute//apiv1/computepb", + "@org_golang_google_api//option", + "@org_golang_google_protobuf//proto", + ], +) diff --git a/internal/attestation/gcp/es/BUILD.bazel b/internal/attestation/gcp/es/BUILD.bazel deleted file mode 100644 index a7d089412..000000000 --- a/internal/attestation/gcp/es/BUILD.bazel +++ /dev/null @@ -1,43 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel/go:go_test.bzl", "go_test") - -go_library( - name = "es", - srcs = [ - "es.go", - "issuer.go", - "validator.go", - ], - importpath = "github.com/edgelesssys/constellation/v2/internal/attestation/gcp/es", - visibility = ["//:__subpackages__"], - deps = [ - "//internal/attestation", - "//internal/attestation/gcp", - "//internal/attestation/variant", - "//internal/attestation/vtpm", - "//internal/config", - "@com_github_google_go_tpm_tools//client", - "@com_github_google_go_tpm_tools//proto/attest", - ], -) - -go_test( - name = "es_test", - srcs = [ - "issuer_test.go", - "validator_test.go", - ], - embed = [":es"], - deps = [ - "//internal/attestation/gcp", - "//internal/attestation/variant", - "//internal/attestation/vtpm", - "@com_github_google_go_tpm_tools//proto/attest", - "@com_github_googleapis_gax_go_v2//:gax-go", - "@com_github_stretchr_testify//assert", - "@com_github_stretchr_testify//require", - "@com_google_cloud_go_compute//apiv1/computepb", - "@org_golang_google_api//option", - "@org_golang_google_protobuf//proto", - ], -) diff --git a/internal/attestation/gcp/es/es.go b/internal/attestation/gcp/es/es.go deleted file mode 100644 index b3c9f2645..000000000 --- a/internal/attestation/gcp/es/es.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -/* -# GCP SEV-ES attestation - -Google offers [confidential VMs], utilizing AMD SEV-ES to provide memory encryption. - -AMD SEV-ES doesn't offer much in terms of remote attestation, and following that the VMs don't offer much either, see [their docs] on how to validate a confidential VM for some insights. -However, each VM comes with a [virtual Trusted Platform Module (vTPM)]. -This module can be used to generate VM unique encryption keys or to attest the platform's chain of boot. We can use the vTPM to verify the VM is running on AMD SEV-ES enabled hardware, allowing us to bootstrap a constellation cluster. - -# Issuer - -Generates a TPM attestation key using a Google provided attestation key. -Additionally project ID, zone, and instance name are fetched from the metadata server and attached to the attestation document. - -# Validator - -Verifies the TPM attestation by using a public key provided by Google's API corresponding to the project ID, zone, instance name tuple attached to the attestation document. - -# Problems - - - SEV-ES is somewhat limited when compared to the newer version SEV-SNP - - Comparison of SEV, SEV-ES, and SEV-SNP can be seen on page seven of [AMD's SNP whitepaper] - - - We have to trust Google - - Since the vTPM is provided by Google, and they could do whatever they want with it, we have no save proof of the VMs actually being confidential. - - - The provided vTPM has no endorsement certificate for its attestation key - - Without a certificate signing the authenticity of any endorsement keys we have no way of establishing a chain of trust. - Instead, we have to rely on Google's API to provide us with the public key of the vTPM's endorsement key. - -[confidential VMs]: https://cloud.google.com/compute/confidential-vm/docs/about-cvm -[their docs]: https://cloud.google.com/compute/confidential-vm/docs/monitoring -[virtual Trusted Platform Module (vTPM)]: https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm -[AMD's SNP whitepaper]: https://www.amd.com/system/files/TechDocs/SEV-SNP-strengthening-vm-isolation-with-integrity-protection-and-more.pdf#page=7 -*/ -package es diff --git a/internal/attestation/gcp/es/issuer.go b/internal/attestation/gcp/es/issuer.go deleted file mode 100644 index 935bc99eb..000000000 --- a/internal/attestation/gcp/es/issuer.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package es - -import ( - "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" - tpmclient "github.com/google/go-tpm-tools/client" -) - -// Issuer for GCP confidential VM attestation. -type Issuer struct { - variant.GCPSEVES - *vtpm.Issuer -} - -// NewIssuer initializes a new GCP Issuer. -func NewIssuer(log attestation.Logger) *Issuer { - return &Issuer{ - Issuer: vtpm.NewIssuer( - vtpm.OpenVTPM, - tpmclient.GceAttestationKeyRSA, - gcp.GCEInstanceInfo(gcp.MetadataClient{}), - log, - ), - } -} diff --git a/internal/attestation/gcp/es/validator.go b/internal/attestation/gcp/es/validator.go deleted file mode 100644 index c846dfdf4..000000000 --- a/internal/attestation/gcp/es/validator.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package es - -import ( - "fmt" - - "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" - "github.com/edgelesssys/constellation/v2/internal/config" - "github.com/google/go-tpm-tools/proto/attest" -) - -const minimumGceVersion = 1 - -// Validator for GCP confidential VM attestation. -type Validator struct { - variant.GCPSEVES - *vtpm.Validator -} - -// NewValidator initializes a new GCP validator with the provided PCR values specified in the config. -func NewValidator(cfg *config.GCPSEVES, log attestation.Logger) (*Validator, error) { - getTrustedKey, err := gcp.TrustedKeyGetter(variant.GCPSEVES{}, gcp.NewRESTClient) - if err != nil { - return nil, fmt.Errorf("create trusted key getter: %v", err) - } - - return &Validator{ - Validator: vtpm.NewValidator( - cfg.Measurements, - getTrustedKey, - validateCVM, - log, - ), - }, nil -} - -// validateCVM checks that the machine state represents a GCE AMD-SEV VM. -func validateCVM(_ vtpm.AttestationDocument, state *attest.MachineState) error { - gceVersion := state.Platform.GetGceVersion() - if gceVersion < minimumGceVersion { - return fmt.Errorf("outdated GCE version: %v (require >= %v)", gceVersion, minimumGceVersion) - } - - tech := state.Platform.Technology - wantTech := attest.GCEConfidentialTechnology_AMD_SEV - if tech != wantTech { - return fmt.Errorf("unexpected confidential technology: %v (expected: %v)", tech, wantTech) - } - - return nil -} diff --git a/internal/attestation/gcp/gcp.go b/internal/attestation/gcp/gcp.go index ce84a7222..893b002a6 100644 --- a/internal/attestation/gcp/gcp.go +++ b/internal/attestation/gcp/gcp.go @@ -1,10 +1,45 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* # Google Cloud Platform attestation + +Google offers [confidential VMs], utilizing AMD SEV-ES to provide memory encryption. + +AMD SEV-ES doesn't offer much in terms of remote attestation, and following that the VMs don't offer much either, see [their docs] on how to validate a confidential VM for some insights. +However, each VM comes with a [virtual Trusted Platform Module (vTPM)]. +This module can be used to generate VM unique encryption keys or to attest the platform's chain of boot. We can use the vTPM to verify the VM is running on AMD SEV-ES enabled hardware, allowing us to bootstrap a constellation cluster. + +# Issuer + +Generates a TPM attestation key using a Google provided attestation key. +Additionally project ID, zone, and instance name are fetched from the metadata server and attached to the attestation document. + +# Validator + +Verifies the TPM attestation by using a public key provided by Google's API corresponding to the project ID, zone, instance name tuple attached to the attestation document. + +# Problems + + - SEV-ES is somewhat limited when compared to the newer version SEV-SNP + + Comparison of SEV, SEV-ES, and SEV-SNP can be seen on page seven of [AMD's SNP whitepaper] + + - We have to trust Google + + Since the vTPM is provided by Google, and they could do whatever they want with it, we have no save proof of the VMs actually being confidential. + + - The provided vTPM has no endorsement certificate for its attestation key + + Without a certificate signing the authenticity of any endorsement keys we have no way of establishing a chain of trust. + Instead, we have to rely on Google's API to provide us with the public key of the vTPM's endorsement key. + +[confidential VMs]: https://cloud.google.com/compute/confidential-vm/docs/about-cvm +[their docs]: https://cloud.google.com/compute/confidential-vm/docs/monitoring +[virtual Trusted Platform Module (vTPM)]: https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm +[AMD's SNP whitepaper]: https://www.amd.com/system/files/TechDocs/SEV-SNP-strengthening-vm-isolation-with-integrity-protection-and-more.pdf#page=7 */ package gcp diff --git a/internal/attestation/gcp/issuer.go b/internal/attestation/gcp/issuer.go new file mode 100644 index 000000000..4dc36ba0d --- /dev/null +++ b/internal/attestation/gcp/issuer.go @@ -0,0 +1,87 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package gcp + +import ( + "context" + "encoding/json" + "errors" + "io" + + "cloud.google.com/go/compute/metadata" + "github.com/edgelesssys/constellation/v2/internal/attestation" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" + tpmclient "github.com/google/go-tpm-tools/client" + "github.com/google/go-tpm-tools/proto/attest" +) + +// Issuer for GCP confidential VM attestation. +type Issuer struct { + variant.GCPSEVES + *vtpm.Issuer +} + +// NewIssuer initializes a new GCP Issuer. +func NewIssuer(log attestation.Logger) *Issuer { + return &Issuer{ + Issuer: vtpm.NewIssuer( + vtpm.OpenVTPM, + tpmclient.GceAttestationKeyRSA, + getGCEInstanceInfo(metadataClient{}), + log, + ), + } +} + +// getGCEInstanceInfo fetches VM metadata used for attestation. +func getGCEInstanceInfo(client gcpMetadataClient) func(context.Context, io.ReadWriteCloser, []byte) ([]byte, error) { + // Ideally we would want to use the endorsement public key certificate + // However, this is not available on GCE instances + // Workaround: Provide ShieldedVM instance info + // The attestating party can request the VMs signing key using Google's API + return func(context.Context, io.ReadWriteCloser, []byte) ([]byte, error) { + projectID, err := client.projectID() + if err != nil { + return nil, errors.New("unable to fetch projectID") + } + zone, err := client.zone() + if err != nil { + return nil, errors.New("unable to fetch zone") + } + instanceName, err := client.instanceName() + if err != nil { + return nil, errors.New("unable to fetch instance name") + } + + return json.Marshal(&attest.GCEInstanceInfo{ + Zone: zone, + ProjectId: projectID, + InstanceName: instanceName, + }) + } +} + +type gcpMetadataClient interface { + projectID() (string, error) + instanceName() (string, error) + zone() (string, error) +} + +type metadataClient struct{} + +func (c metadataClient) projectID() (string, error) { + return metadata.ProjectID() +} + +func (c metadataClient) instanceName() (string, error) { + return metadata.InstanceName() +} + +func (c metadataClient) zone() (string, error) { + return metadata.Zone() +} diff --git a/internal/attestation/gcp/es/issuer_test.go b/internal/attestation/gcp/issuer_test.go similarity index 83% rename from internal/attestation/gcp/es/issuer_test.go rename to internal/attestation/gcp/issuer_test.go index 203466f65..4ad64c7a2 100644 --- a/internal/attestation/gcp/es/issuer_test.go +++ b/internal/attestation/gcp/issuer_test.go @@ -1,10 +1,10 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ -package es +package gcp import ( "context" @@ -13,7 +13,6 @@ import ( "io" "testing" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" "github.com/google/go-tpm-tools/proto/attest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -67,7 +66,7 @@ func TestGetGCEInstanceInfo(t *testing.T) { require := require.New(t) var tpm io.ReadWriteCloser - out, err := gcp.GCEInstanceInfo(tc.client)(t.Context(), tpm, nil) + out, err := getGCEInstanceInfo(tc.client)(context.Background(), tpm, nil) if tc.wantErr { assert.Error(err) } else { @@ -91,14 +90,14 @@ type fakeMetadataClient struct { zoneErr error } -func (c fakeMetadataClient) ProjectID(_ context.Context) (string, error) { +func (c fakeMetadataClient) projectID() (string, error) { return c.projectIDString, c.projecIDErr } -func (c fakeMetadataClient) InstanceName(_ context.Context) (string, error) { +func (c fakeMetadataClient) instanceName() (string, error) { return c.instanceNameString, c.instanceNameErr } -func (c fakeMetadataClient) Zone(_ context.Context) (string, error) { +func (c fakeMetadataClient) zone() (string, error) { return c.zoneString, c.zoneErr } diff --git a/internal/attestation/gcp/metadata.go b/internal/attestation/gcp/metadata.go deleted file mode 100644 index f158933db..000000000 --- a/internal/attestation/gcp/metadata.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package gcp - -import ( - "context" - "encoding/json" - "errors" - "io" - - "cloud.google.com/go/compute/metadata" - "github.com/google/go-tpm-tools/proto/attest" -) - -// GCEInstanceInfo fetches VM metadata used for attestation from the GCE Metadata API. -func GCEInstanceInfo(client gcpMetadataClient) func(context.Context, io.ReadWriteCloser, []byte) ([]byte, error) { - // Ideally we would want to use the endorsement public key certificate - // However, this is not available on GCE instances - // Workaround: Provide ShieldedVM instance info - // The attesting party can request the VMs signing key using Google's API - return func(ctx context.Context, _ io.ReadWriteCloser, _ []byte) ([]byte, error) { - projectID, err := client.ProjectID(ctx) - if err != nil { - return nil, errors.New("unable to fetch projectID") - } - zone, err := client.Zone(ctx) - if err != nil { - return nil, errors.New("unable to fetch zone") - } - instanceName, err := client.InstanceName(ctx) - if err != nil { - return nil, errors.New("unable to fetch instance name") - } - - return json.Marshal(&attest.GCEInstanceInfo{ - Zone: zone, - ProjectId: projectID, - InstanceName: instanceName, - }) - } -} - -type gcpMetadataClient interface { - ProjectID(context.Context) (string, error) - InstanceName(context.Context) (string, error) - Zone(context.Context) (string, error) -} - -// A MetadataClient fetches metadata from the GCE Metadata API. -type MetadataClient struct{} - -// ProjectID returns the project ID of the GCE instance. -func (c MetadataClient) ProjectID(ctx context.Context) (string, error) { - return metadata.ProjectIDWithContext(ctx) -} - -// InstanceName returns the instance name of the GCE instance. -func (c MetadataClient) InstanceName(ctx context.Context) (string, error) { - return metadata.InstanceNameWithContext(ctx) -} - -// Zone returns the zone the GCE instance is located in. -func (c MetadataClient) Zone(ctx context.Context) (string, error) { - return metadata.ZoneWithContext(ctx) -} diff --git a/internal/attestation/gcp/restclient.go b/internal/attestation/gcp/restclient.go deleted file mode 100644 index dd75b8569..000000000 --- a/internal/attestation/gcp/restclient.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package gcp - -import ( - "context" - "crypto" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - - compute "cloud.google.com/go/compute/apiv1" - "cloud.google.com/go/compute/apiv1/computepb" - "github.com/edgelesssys/constellation/v2/internal/attestation/snp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" - "github.com/google/go-tpm-tools/proto/attest" - "github.com/googleapis/gax-go/v2" - "google.golang.org/api/option" -) - -// RESTClient is a client for the GCE API. -type RESTClient struct { - *compute.InstancesClient -} - -// NewRESTClient creates a new RESTClient. -func NewRESTClient(ctx context.Context, opts ...option.ClientOption) (CVMRestClient, error) { - c, err := compute.NewInstancesRESTClient(ctx, opts...) - if err != nil { - return nil, err - } - return &RESTClient{c}, nil -} - -// CVMRestClient is the interface a GCP REST client for a CVM must implement. -type CVMRestClient interface { - GetShieldedInstanceIdentity(ctx context.Context, req *computepb.GetShieldedInstanceIdentityInstanceRequest, opts ...gax.CallOption) (*computepb.ShieldedInstanceIdentity, error) - Close() error -} - -// TrustedKeyGetter returns a function that queries the GCE API for a shieldedVM's public signing key. -// This key can be used to verify attestation statements issued by the VM. -func TrustedKeyGetter( - attestationVariant variant.Variant, - newRESTClient func(ctx context.Context, opts ...option.ClientOption) (CVMRestClient, error), -) (func(ctx context.Context, attDoc vtpm.AttestationDocument, _ []byte) (crypto.PublicKey, error), error) { - return func(ctx context.Context, attDoc vtpm.AttestationDocument, _ []byte) (crypto.PublicKey, error) { - client, err := newRESTClient(ctx) - if err != nil { - return nil, fmt.Errorf("creating GCE client: %w", err) - } - defer client.Close() - - var gceInstanceInfo attest.GCEInstanceInfo - switch attestationVariant { - case variant.GCPSEVES{}: - if err := json.Unmarshal(attDoc.InstanceInfo, &gceInstanceInfo); err != nil { - return nil, err - } - case variant.GCPSEVSNP{}: - var instanceInfo snp.InstanceInfo - if err := json.Unmarshal(attDoc.InstanceInfo, &instanceInfo); err != nil { - return nil, err - } - gceInstanceInfo = attest.GCEInstanceInfo{ - InstanceName: instanceInfo.GCP.InstanceName, - ProjectId: instanceInfo.GCP.ProjectId, - Zone: instanceInfo.GCP.Zone, - } - default: - return nil, fmt.Errorf("unsupported attestation variant: %v", attestationVariant) - } - - instance, err := client.GetShieldedInstanceIdentity(ctx, &computepb.GetShieldedInstanceIdentityInstanceRequest{ - Instance: gceInstanceInfo.GetInstanceName(), - Project: gceInstanceInfo.GetProjectId(), - Zone: gceInstanceInfo.GetZone(), - }) - if err != nil { - return nil, fmt.Errorf("retrieving VM identity: %w", err) - } - - if instance.SigningKey == nil || instance.SigningKey.EkPub == nil { - return nil, fmt.Errorf("received no signing key from GCP API") - } - - // Parse the signing key return by GetShieldedInstanceIdentity - block, _ := pem.Decode([]byte(*instance.SigningKey.EkPub)) - if block == nil || block.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("failed to decode PEM block containing public key") - } - - return x509.ParsePKIXPublicKey(block.Bytes) - }, nil -} diff --git a/internal/attestation/gcp/snp/BUILD.bazel b/internal/attestation/gcp/snp/BUILD.bazel deleted file mode 100644 index 800a69c64..000000000 --- a/internal/attestation/gcp/snp/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "snp", - srcs = [ - "issuer.go", - "snp.go", - "validator.go", - ], - importpath = "github.com/edgelesssys/constellation/v2/internal/attestation/gcp/snp", - visibility = ["//:__subpackages__"], - deps = [ - "//internal/attestation", - "//internal/attestation/gcp", - "//internal/attestation/snp", - "//internal/attestation/variant", - "//internal/attestation/vtpm", - "//internal/config", - "@com_github_google_go_sev_guest//abi", - "@com_github_google_go_sev_guest//kds", - "@com_github_google_go_sev_guest//proto/sevsnp", - "@com_github_google_go_sev_guest//validate", - "@com_github_google_go_sev_guest//verify", - "@com_github_google_go_sev_guest//verify/trust", - "@com_github_google_go_tpm_tools//client", - "@com_github_google_go_tpm_tools//proto/attest", - ], -) diff --git a/internal/attestation/gcp/snp/issuer.go b/internal/attestation/gcp/snp/issuer.go deleted file mode 100644 index a5b26a569..000000000 --- a/internal/attestation/gcp/snp/issuer.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package snp - -import ( - "context" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "io" - - "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" - "github.com/edgelesssys/constellation/v2/internal/attestation/snp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" - - "github.com/google/go-sev-guest/abi" - "github.com/google/go-tpm-tools/client" - tpmclient "github.com/google/go-tpm-tools/client" - "github.com/google/go-tpm-tools/proto/attest" -) - -// Issuer issues SEV-SNP attestations. -type Issuer struct { - variant.GCPSEVSNP - *vtpm.Issuer -} - -// NewIssuer creates a SEV-SNP based issuer for GCP. -func NewIssuer(log attestation.Logger) *Issuer { - return &Issuer{ - Issuer: vtpm.NewIssuer( - vtpm.OpenVTPM, - getAttestationKey, - getInstanceInfo, - log, - ), - } -} - -// getAttestationKey returns a new attestation key. -func getAttestationKey(tpm io.ReadWriter) (*tpmclient.Key, error) { - tpmAk, err := client.GceAttestationKeyRSA(tpm) - if err != nil { - return nil, fmt.Errorf("creating RSA Endorsement key: %w", err) - } - - return tpmAk, nil -} - -// getInstanceInfo generates an extended SNP report, i.e. the report and any loaded certificates. -// Report generation is triggered by sending ioctl syscalls to the SNP guest device, the AMD PSP generates the report. -// The returned bytes will be written into the attestation document. -func getInstanceInfo(ctx context.Context, _ io.ReadWriteCloser, extraData []byte) ([]byte, error) { - if len(extraData) > 64 { - return nil, fmt.Errorf("extra data too long: %d, should be 64 bytes at most", len(extraData)) - } - var extraData64 [64]byte - copy(extraData64[:], extraData) - - report, certs, err := snp.GetExtendedReport(extraData64) - if err != nil { - return nil, fmt.Errorf("getting extended report: %w", err) - } - - vcek, certChain, err := parseSNPCertTable(certs) - if err != nil { - return nil, fmt.Errorf("parsing vcek: %w", err) - } - - gceInstanceInfo, err := gceInstanceInfo(ctx) - if err != nil { - return nil, fmt.Errorf("getting GCE instance info: %w", err) - } - - raw, err := json.Marshal(snp.InstanceInfo{ - AttestationReport: report, - ReportSigner: vcek, - CertChain: certChain, - GCP: gceInstanceInfo, - }) - if err != nil { - return nil, fmt.Errorf("marshalling instance info: %w", err) - } - - return raw, nil -} - -// gceInstanceInfo returns the instance info for a GCE instance from the metadata API. -func gceInstanceInfo(ctx context.Context) (*attest.GCEInstanceInfo, error) { - c := gcp.MetadataClient{} - - instanceName, err := c.InstanceName(ctx) - if err != nil { - return nil, fmt.Errorf("getting instance name: %w", err) - } - - projectID, err := c.ProjectID(ctx) - if err != nil { - return nil, fmt.Errorf("getting project ID: %w", err) - } - - zone, err := c.Zone(ctx) - if err != nil { - return nil, fmt.Errorf("getting zone: %w", err) - } - - return &attest.GCEInstanceInfo{ - InstanceName: instanceName, - ProjectId: projectID, - Zone: zone, - }, nil -} - -// parseSNPCertTable takes a marshalled SNP certificate table and returns the PEM-encoded VCEK certificate and, -// if present, the ASK of the SNP certificate chain. -// AMD documentation on certificate tables can be found in section 4.1.8.1, revision 2.03 "SEV-ES Guest-Hypervisor Communication Block Standardization". -// https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56421.pdf -func parseSNPCertTable(certs []byte) (vcekPEM []byte, certChain []byte, err error) { - certTable := abi.CertTable{} - if err := certTable.Unmarshal(certs); err != nil { - return nil, nil, fmt.Errorf("unmarshalling SNP certificate table: %w", err) - } - - vcekRaw, err := certTable.GetByGUIDString(abi.VcekGUID) - if err != nil { - return nil, nil, fmt.Errorf("getting VCEK certificate: %w", err) - } - - // An optional check for certificate well-formedness. vcekRaw == cert.Raw. - vcek, err := x509.ParseCertificate(vcekRaw) - if err != nil { - return nil, nil, fmt.Errorf("parsing certificate: %w", err) - } - - vcekPEM = pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: vcek.Raw, - }) - - var askPEM []byte - if askRaw, err := certTable.GetByGUIDString(abi.AskGUID); err == nil { - ask, err := x509.ParseCertificate(askRaw) - if err != nil { - return nil, nil, fmt.Errorf("parsing ASK certificate: %w", err) - } - - askPEM = pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: ask.Raw, - }) - } - - return vcekPEM, askPEM, nil -} diff --git a/internal/attestation/gcp/snp/snp.go b/internal/attestation/gcp/snp/snp.go deleted file mode 100644 index d1e42728f..000000000 --- a/internal/attestation/gcp/snp/snp.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -/* -# GCP SEV-SNP attestation - -Google offers [confidential VMs], utilizing AMD SEV-SNP to provide memory encryption. - -Each SEV-SNP VM comes with a [virtual Trusted Platform Module (vTPM)]. -This vTPM can be used to generate encryption keys unique to the VM or to attest the platform's boot chain. -We can use the vTPM to verify the VM is running on AMD SEV-SNP enabled hardware and booted the expected OS image, allowing us to bootstrap a constellation cluster. - -# Issuer - -Retrieves an SEV-SNP attestation statement for the VM it's running in. Then, it generates a TPM attestation statement, binding the SEV-SNP attestation statement to it by including its hash in the TPM attestation statement. -Without binding the SEV-SNP attestation statement to the TPM attestation statement, the SEV-SNP attestation statement could be used in a different VM. Furthermore, it's important to first create the SEV-SNP attestation statement -and then the TPM attestation statement, as otherwise, a non-CVM could be used to create a valid TPM attestation statement, and then later swap the SEV-SNP attestation statement with one from a CVM. -Additionally project ID, zone, and instance name are fetched from the metadata server and attached to the attestation statement. - -# Validator - -First, it verifies the SEV-SNP attestation statement by checking the signatures and claims. Then, it verifies the TPM attestation by using a -public key provided by Google's API corresponding to the project ID, zone, instance name tuple attached to the attestation document, and confirms whether the SEV-SNP attestation statement is bound to the TPM attestation statement. - -# Problems - - - We have to trust Google - - Since the vTPM is provided by Google, and they could do whatever they want with it, we have no save proof of the VMs actually being confidential. - - - The provided vTPM has no endorsement certificate for its attestation key - - Without a certificate signing the authenticity of any endorsement keys we have no way of establishing a chain of trust. - Instead, we have to rely on Google's API to provide us with the public key of the vTPM's endorsement key. - -[confidential VMs]: https://cloud.google.com/compute/confidential-vm/docs/about-cvm -[virtual Trusted Platform Module (vTPM)]: https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm -*/ -package snp diff --git a/internal/attestation/gcp/snp/validator.go b/internal/attestation/gcp/snp/validator.go deleted file mode 100644 index 2dffff7b6..000000000 --- a/internal/attestation/gcp/snp/validator.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package snp - -import ( - "context" - "crypto" - "crypto/x509" - "encoding/json" - "fmt" - - "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" - "github.com/edgelesssys/constellation/v2/internal/attestation/snp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" - "github.com/edgelesssys/constellation/v2/internal/config" - "github.com/google/go-sev-guest/abi" - "github.com/google/go-sev-guest/kds" - "github.com/google/go-sev-guest/proto/sevsnp" - "github.com/google/go-sev-guest/validate" - "github.com/google/go-sev-guest/verify" - "github.com/google/go-sev-guest/verify/trust" - "github.com/google/go-tpm-tools/proto/attest" -) - -// Validator for GCP SEV-SNP / TPM attestation. -type Validator struct { - variant.GCPSEVSNP - *vtpm.Validator - cfg *config.GCPSEVSNP - - // reportValidator validates a SNP report and is required for testing. - reportValidator snpReportValidator - - // gceKeyGetter gets the public key of the EK from the GCE metadata API. - gceKeyGetter func(ctx context.Context, attDoc vtpm.AttestationDocument, _ []byte) (crypto.PublicKey, error) - - log attestation.Logger -} - -// NewValidator creates a new Validator. -func NewValidator(cfg *config.GCPSEVSNP, log attestation.Logger) (*Validator, error) { - getGCEKey, err := gcp.TrustedKeyGetter(variant.GCPSEVSNP{}, gcp.NewRESTClient) - if err != nil { - return nil, fmt.Errorf("creating trusted key getter: %w", err) - } - - v := &Validator{ - cfg: cfg, - reportValidator: &gcpValidator{httpsGetter: trust.DefaultHTTPSGetter(), verifier: &reportVerifierImpl{}, validator: &reportValidatorImpl{}}, - gceKeyGetter: getGCEKey, - log: log, - } - - v.Validator = vtpm.NewValidator( - cfg.Measurements, - v.getTrustedKey, - func(_ vtpm.AttestationDocument, _ *attest.MachineState) error { return nil }, - log, - ) - return v, nil -} - -// getTrustedKey returns TPM endorsement key provided through the GCE metadata API. -func (v *Validator) getTrustedKey(ctx context.Context, attDoc vtpm.AttestationDocument, extraData []byte) (crypto.PublicKey, error) { - if len(extraData) > 64 { - return nil, fmt.Errorf("extra data too long: %d, should be 64 bytes at most", len(extraData)) - } - var extraData64 [64]byte - copy(extraData64[:], extraData) - - if err := v.reportValidator.validate(attDoc, (*x509.Certificate)(&v.cfg.AMDSigningKey), (*x509.Certificate)(&v.cfg.AMDRootKey), extraData64, v.cfg, v.log); err != nil { - return nil, fmt.Errorf("validating SNP report: %w", err) - } - - ekPub, err := v.gceKeyGetter(ctx, attDoc, nil) - if err != nil { - return nil, fmt.Errorf("getting TPM endorsement key: %w", err) - } - - return ekPub, nil -} - -// snpReportValidator validates a given SNP report. -type snpReportValidator interface { - validate(attestation vtpm.AttestationDocument, ask *x509.Certificate, ark *x509.Certificate, ak [64]byte, config *config.GCPSEVSNP, log attestation.Logger) error -} - -// gcpValidator implements the validation for GCP SEV-SNP attestation. -// The properties exist for unittesting. -type gcpValidator struct { - verifier reportVerifier - validator reportValidator - httpsGetter trust.HTTPSGetter -} - -type reportVerifier interface { - SnpAttestation(att *sevsnp.Attestation, opts *verify.Options) error -} -type reportValidator interface { - SnpAttestation(att *sevsnp.Attestation, opts *validate.Options) error -} - -type reportValidatorImpl struct{} - -func (r *reportValidatorImpl) SnpAttestation(att *sevsnp.Attestation, opts *validate.Options) error { - return validate.SnpAttestation(att, opts) -} - -type reportVerifierImpl struct{} - -func (r *reportVerifierImpl) SnpAttestation(att *sevsnp.Attestation, opts *verify.Options) error { - return verify.SnpAttestation(att, opts) -} - -// validate the report by checking if it has a valid VCEK signature. -// The certificate chain ARK -> ASK -> VCEK is also validated. -// Checks that the report's userData matches the connection's userData. -func (a *gcpValidator) validate(attestation vtpm.AttestationDocument, ask *x509.Certificate, ark *x509.Certificate, reportData [64]byte, config *config.GCPSEVSNP, log attestation.Logger) error { - var info snp.InstanceInfo - if err := json.Unmarshal(attestation.InstanceInfo, &info); err != nil { - return fmt.Errorf("unmarshalling instance info: %w", err) - } - - certchain := snp.NewCertificateChain(ask, ark) - - att, err := info.AttestationWithCerts(a.httpsGetter, certchain, log) - if err != nil { - return fmt.Errorf("getting attestation with certs: %w", err) - } - - verifyOpts, err := getVerifyOpts(att) - if err != nil { - return fmt.Errorf("getting verify options: %w", err) - } - - if err := a.verifier.SnpAttestation(att, verifyOpts); err != nil { - return fmt.Errorf("verifying SNP attestation: %w", err) - } - - validateOpts := &validate.Options{ - // Check that the attestation key's digest is included in the report. - ReportData: reportData[:], - GuestPolicy: abi.SnpPolicy{ - Debug: false, // Debug means the VM can be decrypted by the host for debugging purposes and thus is not allowed. - SMT: true, // Allow Simultaneous Multi-Threading (SMT). Normally, we would want to disable SMT - // but GCP machines are currently facing issues if it's disabled - }, - VMPL: new(int), // Checks that Virtual Machine Privilege Level (VMPL) is 0. - // This checks that the reported LaunchTCB version is equal or greater than the minimum specified in the config. - // We don't specify Options.MinimumTCB as it only restricts the allowed TCB for Current_ and Reported_TCB. - // Because we allow Options.ProvisionalFirmware, there is not security gained in also checking Current_ and Reported_TCB. - // We always have to check Launch_TCB as this value indicated the smallest TCB version a VM has seen during - // it's lifetime. - MinimumLaunchTCB: kds.TCBParts{ - BlSpl: config.BootloaderVersion.Value, // Bootloader - TeeSpl: config.TEEVersion.Value, // TEE (Secure OS) - SnpSpl: config.SNPVersion.Value, // SNP - UcodeSpl: config.MicrocodeVersion.Value, // Microcode - }, - // Check that CurrentTCB >= CommittedTCB. - PermitProvisionalFirmware: true, - } - - // Checks if the attestation report matches the given constraints. - // Some constraints are implicitly checked by validate.SnpAttestation: - // - the report is not expired - if err := a.validator.SnpAttestation(att, validateOpts); err != nil { - return fmt.Errorf("validating SNP attestation: %w", err) - } - - return nil -} - -func getVerifyOpts(att *sevsnp.Attestation) (*verify.Options, error) { - ask, err := x509.ParseCertificate(att.CertificateChain.AskCert) - if err != nil { - return nil, fmt.Errorf("parsing ASK certificate: %w", err) - } - ark, err := x509.ParseCertificate(att.CertificateChain.ArkCert) - if err != nil { - return nil, fmt.Errorf("parsing ARK certificate: %w", err) - } - - verifyOpts := &verify.Options{ - DisableCertFetching: true, - TrustedRoots: map[string][]*trust.AMDRootCerts{ - "Milan": { - { - Product: "Milan", - ProductCerts: &trust.ProductCerts{ - Ask: ask, - Ark: ark, - }, - }, - }, - }, - } - - return verifyOpts, nil -} diff --git a/internal/attestation/gcp/validator.go b/internal/attestation/gcp/validator.go new file mode 100644 index 000000000..310a33b55 --- /dev/null +++ b/internal/attestation/gcp/validator.go @@ -0,0 +1,120 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package gcp + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + + compute "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" + "github.com/edgelesssys/constellation/v2/internal/attestation" + "github.com/edgelesssys/constellation/v2/internal/attestation/variant" + "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" + "github.com/edgelesssys/constellation/v2/internal/config" + "github.com/google/go-tpm-tools/proto/attest" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" +) + +const minimumGceVersion = 1 + +// Validator for GCP confidential VM attestation. +type Validator struct { + variant.GCPSEVES + *vtpm.Validator + + restClient func(context.Context, ...option.ClientOption) (gcpRestClient, error) +} + +// NewValidator initializes a new GCP validator with the provided PCR values. +func NewValidator(cfg *config.GCPSEVES, log attestation.Logger) *Validator { + v := &Validator{ + restClient: newInstanceClient, + } + v.Validator = vtpm.NewValidator( + cfg.Measurements, + v.trustedKeyFromGCEAPI, + validateCVM, + log, + ) + + return v +} + +type gcpRestClient interface { + GetShieldedInstanceIdentity(ctx context.Context, req *computepb.GetShieldedInstanceIdentityInstanceRequest, opts ...gax.CallOption) (*computepb.ShieldedInstanceIdentity, error) + Close() error +} + +type instanceClient struct { + *compute.InstancesClient +} + +func newInstanceClient(ctx context.Context, opts ...option.ClientOption) (gcpRestClient, error) { + c, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return nil, err + } + return &instanceClient{c}, nil +} + +// trustedKeyFromGCEAPI queries the GCE API for a shieldedVM's public signing key. +// This key can be used to verify attestation statements issued by the VM. +func (v *Validator) trustedKeyFromGCEAPI(ctx context.Context, attDoc vtpm.AttestationDocument, _ []byte) (crypto.PublicKey, error) { + client, err := v.restClient(ctx) + if err != nil { + return nil, fmt.Errorf("creating GCE client: %w", err) + } + defer client.Close() + + var instanceInfo attest.GCEInstanceInfo + if err := json.Unmarshal(attDoc.InstanceInfo, &instanceInfo); err != nil { + return nil, err + } + + instance, err := client.GetShieldedInstanceIdentity(ctx, &computepb.GetShieldedInstanceIdentityInstanceRequest{ + Instance: instanceInfo.GetInstanceName(), + Project: instanceInfo.GetProjectId(), + Zone: instanceInfo.GetZone(), + }) + if err != nil { + return nil, fmt.Errorf("retrieving VM identity: %w", err) + } + + if instance.SigningKey == nil || instance.SigningKey.EkPub == nil { + return nil, fmt.Errorf("received no signing key from GCP API") + } + + // Parse the signing key return by GetShieldedInstanceIdentity + block, _ := pem.Decode([]byte(*instance.SigningKey.EkPub)) + if block == nil || block.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("failed to decode PEM block containing public key") + } + + return x509.ParsePKIXPublicKey(block.Bytes) +} + +// validateCVM checks that the machine state represents a GCE AMD-SEV VM. +func validateCVM(_ vtpm.AttestationDocument, state *attest.MachineState) error { + gceVersion := state.Platform.GetGceVersion() + if gceVersion < minimumGceVersion { + return fmt.Errorf("outdated GCE version: %v (require >= %v)", gceVersion, minimumGceVersion) + } + + tech := state.Platform.Technology + wantTech := attest.GCEConfidentialTechnology_AMD_SEV + if tech != wantTech { + return fmt.Errorf("unexpected confidential technology: %v (expected: %v)", tech, wantTech) + } + + return nil +} diff --git a/internal/attestation/gcp/es/validator_test.go b/internal/attestation/gcp/validator_test.go similarity index 91% rename from internal/attestation/gcp/es/validator_test.go rename to internal/attestation/gcp/validator_test.go index fc3783594..203809a4f 100644 --- a/internal/attestation/gcp/es/validator_test.go +++ b/internal/attestation/gcp/validator_test.go @@ -1,10 +1,10 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ -package es +package gcp import ( "context" @@ -14,8 +14,6 @@ import ( "testing" "cloud.google.com/go/compute/apiv1/computepb" - "github.com/edgelesssys/constellation/v2/internal/attestation/gcp" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/attestation/vtpm" "github.com/google/go-tpm-tools/proto/attest" "github.com/googleapis/gax-go/v2" @@ -89,7 +87,7 @@ Y+t5OxL3kL15VzY1Ob0d5cMCAwEAAQ== testCases := map[string]struct { instanceInfo []byte - getClient func(ctx context.Context, opts ...option.ClientOption) (gcp.CVMRestClient, error) + getClient func(ctx context.Context, opts ...option.ClientOption) (gcpRestClient, error) wantErr bool }{ "success": { @@ -148,12 +146,12 @@ Y+t5OxL3kL15VzY1Ob0d5cMCAwEAAQ== t.Run(name, func(t *testing.T) { assert := assert.New(t) + v := &Validator{ + restClient: tc.getClient, + } attDoc := vtpm.AttestationDocument{InstanceInfo: tc.instanceInfo} - getTrustedKey, err := gcp.TrustedKeyGetter(variant.GCPSEVES{}, tc.getClient) - require.NoError(t, err) - - out, err := getTrustedKey(t.Context(), attDoc, nil) + out, err := v.trustedKeyFromGCEAPI(context.Background(), attDoc, nil) if tc.wantErr { assert.Error(err) @@ -177,8 +175,8 @@ type fakeInstanceClient struct { ident *computepb.ShieldedInstanceIdentity } -func prepareFakeClient(ident *computepb.ShieldedInstanceIdentity, newErr, getIdentErr error) func(ctx context.Context, opts ...option.ClientOption) (gcp.CVMRestClient, error) { - return func(_ context.Context, _ ...option.ClientOption) (gcp.CVMRestClient, error) { +func prepareFakeClient(ident *computepb.ShieldedInstanceIdentity, newErr, getIdentErr error) func(ctx context.Context, opts ...option.ClientOption) (gcpRestClient, error) { + return func(_ context.Context, _ ...option.ClientOption) (gcpRestClient, error) { return &fakeInstanceClient{ getIdentErr: getIdentErr, ident: ident, diff --git a/internal/attestation/idkeydigest/idkeydigest.go b/internal/attestation/idkeydigest/idkeydigest.go index 4e97c6ef0..45b5a54c0 100644 --- a/internal/attestation/idkeydigest/idkeydigest.go +++ b/internal/attestation/idkeydigest/idkeydigest.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package idkeydigest provides type definitions for the `idkeydigest` value of SEV-SNP attestation. diff --git a/internal/attestation/idkeydigest/idkeydigest_test.go b/internal/attestation/idkeydigest/idkeydigest_test.go index 85ea0bf73..9f83b20e7 100644 --- a/internal/attestation/idkeydigest/idkeydigest_test.go +++ b/internal/attestation/idkeydigest/idkeydigest_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package idkeydigest diff --git a/internal/attestation/initialize/initialize.go b/internal/attestation/initialize/initialize.go index 5916e6a87..65bfe349c 100644 --- a/internal/attestation/initialize/initialize.go +++ b/internal/attestation/initialize/initialize.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package initialize implements functions to mark a node as initialized in the context of cluster attestation. diff --git a/internal/attestation/initialize/initialize_test.go b/internal/attestation/initialize/initialize_test.go index a3e386fd4..bd31e60f0 100644 --- a/internal/attestation/initialize/initialize_test.go +++ b/internal/attestation/initialize/initialize_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package initialize diff --git a/internal/attestation/measurements/fetchmeasurements.go b/internal/attestation/measurements/fetchmeasurements.go index 4c7d1026a..7720d1a59 100644 --- a/internal/attestation/measurements/fetchmeasurements.go +++ b/internal/attestation/measurements/fetchmeasurements.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements diff --git a/internal/attestation/measurements/fetchmeasurements_test.go b/internal/attestation/measurements/fetchmeasurements_test.go index e9af9edb2..d79a77a41 100644 --- a/internal/attestation/measurements/fetchmeasurements_test.go +++ b/internal/attestation/measurements/fetchmeasurements_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements @@ -141,7 +141,7 @@ func TestFetchMeasurements(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) sut := NewVerifyFetcher(tc.cosign, tc.rekor, client) - m, err := sut.FetchAndVerifyMeasurements(t.Context(), "v999.999.999", cloudprovider.GCP, variant.GCPSEVES{}, tc.noVerify) + m, err := sut.FetchAndVerifyMeasurements(context.Background(), "v999.999.999", cloudprovider.GCP, variant.GCPSEVES{}, tc.noVerify) if tc.wantErr { assert.Error(err) if tc.asRekorErr { diff --git a/internal/attestation/measurements/measurement-generator/generate.go b/internal/attestation/measurements/measurement-generator/generate.go index f5c71a8e4..b552c6b7d 100644 --- a/internal/attestation/measurements/measurement-generator/generate.go +++ b/internal/attestation/measurements/measurement-generator/generate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -84,9 +84,9 @@ func main() { log.Println("Found", variant) returnStmtCtr++ // retrieve and validate measurements for the given CSP and image - measurements := mustGetMeasurements(ctx, rekor, provider, variant, defaultImage) + measuremnts := mustGetMeasurements(ctx, rekor, provider, variant, defaultImage) // replace the return statement with a composite literal containing the validated measurements - clause.Values[0] = measurementsCompositeLiteral(measurements) + clause.Values[0] = measurementsCompositeLiteral(measuremnts) } return true }, nil, @@ -267,8 +267,6 @@ func attestationVariantFromGoIdentifier(identifier string) (variant.Variant, err return variant.AWSNitroTPM{}, nil case "GCPSEVES": return variant.GCPSEVES{}, nil - case "GCPSEVSNP": - return variant.GCPSEVSNP{}, nil case "AzureSEVSNP": return variant.AzureSEVSNP{}, nil case "AzureTDX": diff --git a/internal/attestation/measurements/measurement-generator/generate_test.go b/internal/attestation/measurements/measurement-generator/generate_test.go index c7ea5df5c..e55c3c1ef 100644 --- a/internal/attestation/measurements/measurement-generator/generate_test.go +++ b/internal/attestation/measurements/measurement-generator/generate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/internal/attestation/measurements/measurements.go b/internal/attestation/measurements/measurements.go index 13a881996..62172dd26 100644 --- a/internal/attestation/measurements/measurements.go +++ b/internal/attestation/measurements/measurements.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -26,7 +26,6 @@ import ( "net/url" "sort" "strconv" - "strings" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -331,15 +330,6 @@ func (m *M) UnmarshalYAML(unmarshal func(any) error) error { return nil } -// String returns a string representation of the measurements. -func (m M) String() string { - var returnString string - for i, measurement := range m { - returnString = strings.Join([]string{returnString, fmt.Sprintf("%d: 0x%s", i, hex.EncodeToString(measurement.Expected))}, ",") - } - return returnString -} - func (m *M) fromImageMeasurementsV2( measurements ImageMeasurementsV2, wantVersion versionsapi.Version, csp cloudprovider.Provider, attestationVariant variant.Variant, @@ -480,17 +470,17 @@ func (m *Measurement) unmarshal(eM encodedMeasurement) error { // WithAllBytes returns a measurement value where all bytes are set to b. Takes a dynamic length as input. // Expected are either 32 bytes (PCRMeasurementLength) or 48 bytes (TDXMeasurementLength). // Over inputs are possible in this function, but potentially rejected elsewhere. -func WithAllBytes(b byte, validationOpt MeasurementValidationOption, length int) Measurement { +func WithAllBytes(b byte, validationOpt MeasurementValidationOption, len int) Measurement { return Measurement{ - Expected: bytes.Repeat([]byte{b}, length), + Expected: bytes.Repeat([]byte{b}, len), ValidationOpt: validationOpt, } } // PlaceHolderMeasurement returns a measurement with placeholder values for Expected. -func PlaceHolderMeasurement(length int) Measurement { +func PlaceHolderMeasurement(len int) Measurement { return Measurement{ - Expected: bytes.Repeat([]byte{0x12, 0x34}, length/2), + Expected: bytes.Repeat([]byte{0x12, 0x34}, len/2), ValidationOpt: Enforce, } } @@ -516,9 +506,6 @@ func DefaultsFor(provider cloudprovider.Provider, attestationVariant variant.Var case provider == cloudprovider.GCP && attestationVariant == variant.GCPSEVES{}: return gcp_GCPSEVES.Copy() - case provider == cloudprovider.GCP && attestationVariant == variant.GCPSEVSNP{}: - return gcp_GCPSEVSNP.Copy() - case provider == cloudprovider.OpenStack && attestationVariant == variant.QEMUVTPM{}: return openstack_QEMUVTPM.Copy() diff --git a/internal/attestation/measurements/measurements_enterprise.go b/internal/attestation/measurements/measurements_enterprise.go index 210a654bc..226563848 100644 --- a/internal/attestation/measurements/measurements_enterprise.go +++ b/internal/attestation/measurements/measurements_enterprise.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements @@ -13,20 +13,16 @@ package measurements // a build tag. // The enterprise build tag is required to validate the measurements using production // sigstore certificates. -// -// To add measurements for a new variant, add a new entry as `_ = M{}` and run the generate tool. -// Entries defined as `_ M` are ignored. // revive:disable:var-naming var ( - aws_AWSNitroTPM = M{0: {Expected: []byte{0x73, 0x7f, 0x76, 0x7a, 0x12, 0xf5, 0x4e, 0x70, 0xee, 0xcb, 0xc8, 0x68, 0x40, 0x11, 0x32, 0x3a, 0xe2, 0xfe, 0x2d, 0xd9, 0xf9, 0x07, 0x85, 0x57, 0x79, 0x69, 0xd7, 0xa2, 0x01, 0x3e, 0x8c, 0x12}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x65, 0xbf, 0xea, 0xea, 0x27, 0xa7, 0x72, 0xde, 0x9b, 0x91, 0x5d, 0x9e, 0x95, 0xe7, 0xd5, 0x2e, 0x4c, 0xe0, 0xf9, 0x47, 0x4e, 0x5f, 0x8f, 0x54, 0xd0, 0xe4, 0x1e, 0x6f, 0x51, 0xfd, 0xe1, 0x58}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x46, 0x75, 0x96, 0x2f, 0xff, 0x48, 0x3d, 0x43, 0x4a, 0x17, 0x28, 0x9b, 0x02, 0x9d, 0xb7, 0x9c, 0xee, 0x9b, 0x34, 0xc5, 0xdf, 0x4c, 0xc5, 0xbd, 0x38, 0x69, 0x94, 0x8f, 0x6c, 0x83, 0x3e, 0x09}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x15, 0xa7, 0xd9, 0xd4, 0x2d, 0xd7, 0xfd, 0x0b, 0x3d, 0x93, 0x70, 0xa4, 0xff, 0x75, 0x06, 0x24, 0x18, 0xb9, 0x28, 0x8b, 0x25, 0x1a, 0x1d, 0x6a, 0x88, 0x81, 0xf1, 0x84, 0xf6, 0x4e, 0x24, 0x30}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} - aws_AWSSEVSNP = M{0: {Expected: []byte{0xd6, 0xdf, 0x85, 0x53, 0x58, 0xf5, 0xb1, 0x0f, 0x06, 0xf0, 0xfa, 0xb3, 0xf4, 0x08, 0xad, 0x26, 0xcd, 0x16, 0x5a, 0x29, 0x49, 0xba, 0xd6, 0x9e, 0x2c, 0xc7, 0x56, 0x92, 0x52, 0x9e, 0x66, 0x2a}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x3c, 0xc3, 0x03, 0xc6, 0x4f, 0x85, 0x5f, 0x01, 0x65, 0xf6, 0xf0, 0x94, 0x9f, 0xd8, 0x67, 0xf2, 0x32, 0x73, 0xc2, 0xe8, 0xa8, 0x6d, 0xb7, 0x28, 0x38, 0xf8, 0x98, 0x9e, 0x9a, 0x16, 0x31, 0x58}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x7e, 0xd4, 0xe2, 0x4a, 0xb5, 0x2f, 0x91, 0x2c, 0x5b, 0x13, 0x0a, 0xef, 0xef, 0x09, 0x56, 0x00, 0xbc, 0xf4, 0x6c, 0x5a, 0x98, 0xd5, 0x99, 0x10, 0xb4, 0x83, 0x76, 0xaa, 0x0b, 0x8d, 0x50, 0xbd}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xfc, 0x6c, 0x8b, 0xa3, 0x44, 0x6f, 0x79, 0x33, 0x8e, 0x09, 0xce, 0xac, 0xeb, 0x80, 0xf9, 0x15, 0x0b, 0x4a, 0xe6, 0x17, 0x96, 0x3d, 0xcd, 0xb8, 0xe7, 0x9d, 0x90, 0x44, 0xe1, 0x4a, 0x52, 0x85}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} - azure_AzureSEVSNP = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xf7, 0xe1, 0x4d, 0xf7, 0xee, 0x08, 0xaf, 0xeb, 0x8b, 0x73, 0xb2, 0x90, 0x42, 0x68, 0xe6, 0xed, 0xe6, 0x5a, 0x3c, 0xbd, 0x62, 0x79, 0xb0, 0x0d, 0xa9, 0xb5, 0x5e, 0x0e, 0x4e, 0x73, 0x1a, 0x98}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x7c, 0xdf, 0xd4, 0xaa, 0xa5, 0x7f, 0x6b, 0xa9, 0xc3, 0xcb, 0xff, 0x3b, 0xe2, 0xc6, 0x70, 0xe2, 0x0f, 0x77, 0x6a, 0xea, 0xe0, 0x54, 0xf6, 0x63, 0xcf, 0x10, 0x58, 0x1b, 0xf0, 0x73, 0x0a, 0xaa}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xa2, 0x8e, 0x42, 0x0a, 0xca, 0x11, 0xe5, 0x22, 0x00, 0xb9, 0xa8, 0x5b, 0x05, 0x03, 0x81, 0x48, 0x02, 0xf5, 0xda, 0x0d, 0xf3, 0xda, 0xdc, 0x9d, 0xb6, 0x69, 0xb3, 0x7c, 0x88, 0x51, 0xf8, 0x40}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} - azure_AzureTDX = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xee, 0x09, 0xc4, 0x03, 0x0c, 0xda, 0x6b, 0x15, 0x4c, 0x33, 0x34, 0xcc, 0x5f, 0xe8, 0xd6, 0x03, 0x66, 0xed, 0x08, 0x65, 0x37, 0x54, 0x6c, 0x89, 0xac, 0x30, 0xb7, 0x5c, 0x64, 0xdd, 0xad, 0x34}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x2c, 0xfa, 0x71, 0x11, 0xa6, 0x8f, 0xb6, 0xe4, 0x77, 0xbd, 0xf9, 0xeb, 0xc5, 0x03, 0x42, 0x83, 0x2c, 0x02, 0x45, 0xb6, 0xb8, 0x43, 0x76, 0x9e, 0x94, 0x43, 0xc2, 0x11, 0xae, 0x44, 0x1c, 0x0c}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x20, 0xc6, 0x2f, 0x57, 0x12, 0x6f, 0x01, 0xa9, 0xde, 0x29, 0xb1, 0x3a, 0x4b, 0x23, 0x73, 0x54, 0xee, 0x03, 0xf8, 0x5e, 0xf2, 0x92, 0xb6, 0x7c, 0x5a, 0x05, 0xaa, 0xd9, 0xeb, 0xd5, 0x0c, 0x6d}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + aws_AWSNitroTPM = M{0: {Expected: []byte{0x73, 0x7f, 0x76, 0x7a, 0x12, 0xf5, 0x4e, 0x70, 0xee, 0xcb, 0xc8, 0x68, 0x40, 0x11, 0x32, 0x3a, 0xe2, 0xfe, 0x2d, 0xd9, 0xf9, 0x07, 0x85, 0x57, 0x79, 0x69, 0xd7, 0xa2, 0x01, 0x3e, 0x8c, 0x12}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xb6, 0x86, 0xbc, 0x7c, 0xe9, 0xee, 0x11, 0x3c, 0x4e, 0xe4, 0x66, 0xf2, 0xce, 0x24, 0xdd, 0xef, 0x11, 0x39, 0x75, 0x7e, 0xe2, 0xd3, 0xa7, 0xcc, 0xd3, 0x8c, 0x5e, 0x34, 0x15, 0xb3, 0x60, 0x4e}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xe4, 0x84, 0x0d, 0x74, 0xe5, 0xaa, 0xaf, 0x7a, 0x75, 0x27, 0xce, 0xd7, 0x28, 0xc0, 0xa7, 0x51, 0x24, 0x32, 0x61, 0x06, 0x14, 0xb7, 0x6a, 0xee, 0xc3, 0x43, 0xa6, 0x56, 0x47, 0xc5, 0x41, 0xed}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xfa, 0x60, 0x0e, 0xf1, 0x9b, 0xd8, 0x81, 0x70, 0xa7, 0xa0, 0x34, 0x86, 0x79, 0x35, 0xe0, 0x4d, 0x48, 0xa0, 0xc2, 0x8c, 0xda, 0x9c, 0xa8, 0xbb, 0xdc, 0xce, 0x9b, 0x51, 0x04, 0x7d, 0xca, 0x2c}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + aws_AWSSEVSNP = M{0: {Expected: []byte{0x7b, 0x06, 0x8c, 0x0c, 0x3a, 0xc2, 0x9a, 0xfe, 0x26, 0x41, 0x34, 0x53, 0x6b, 0x9b, 0xe2, 0x6f, 0x1d, 0x4c, 0xcd, 0x57, 0x5b, 0x88, 0xd3, 0xc3, 0xce, 0xab, 0xf3, 0x6a, 0xc9, 0x9c, 0x02, 0x78}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x93, 0x6b, 0x2f, 0xf8, 0x60, 0xa3, 0xfa, 0x97, 0x05, 0x46, 0xe9, 0x8c, 0x43, 0xb6, 0xdd, 0x42, 0x39, 0x34, 0x9d, 0x53, 0xe9, 0x10, 0xba, 0x04, 0x6c, 0xe9, 0x5a, 0x2e, 0x85, 0x9b, 0xbb, 0x2e}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xf3, 0x96, 0x93, 0x03, 0x13, 0xa3, 0x67, 0x8c, 0xf9, 0xc5, 0x1d, 0x89, 0x34, 0xbc, 0xd1, 0xcf, 0xd0, 0xf6, 0x15, 0x56, 0x6c, 0xac, 0x3a, 0xee, 0xba, 0xbd, 0xe9, 0x71, 0x2e, 0x8b, 0xc1, 0xa1}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xc3, 0xa8, 0x62, 0xa2, 0x72, 0x2e, 0xa9, 0x0d, 0x73, 0xf0, 0x51, 0x14, 0x4c, 0x2d, 0x79, 0x76, 0x87, 0x40, 0xb8, 0x45, 0xf5, 0x39, 0xa6, 0xab, 0x0d, 0x62, 0xe2, 0x2c, 0x9f, 0x84, 0x1b, 0x03}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + azure_AzureSEVSNP = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xe7, 0x66, 0xf4, 0x6e, 0xd4, 0x3f, 0x14, 0x56, 0x49, 0xee, 0xdb, 0x05, 0xd3, 0xcc, 0xfe, 0xbd, 0x62, 0xee, 0xb8, 0xff, 0x9a, 0xac, 0x93, 0xb0, 0x3a, 0x10, 0x5a, 0x33, 0x9c, 0x41, 0x93, 0xb9}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xd3, 0x01, 0x66, 0x75, 0xb2, 0xf2, 0xf5, 0x48, 0x0b, 0xc4, 0x4a, 0x58, 0xfd, 0xe3, 0x3b, 0x61, 0x08, 0xe3, 0xb4, 0x6c, 0x3e, 0xac, 0x6c, 0x3e, 0x54, 0xd6, 0x6d, 0xb3, 0x50, 0x09, 0xcc, 0xad}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x2b, 0xce, 0x7d, 0x09, 0xec, 0x08, 0xdb, 0xa5, 0x0b, 0xae, 0x74, 0x5d, 0x5a, 0x46, 0xea, 0x3e, 0x61, 0xd4, 0x8e, 0xdb, 0x80, 0x41, 0x63, 0xb4, 0xac, 0xff, 0xf3, 0x56, 0x79, 0xdb, 0x83, 0x6f}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + azure_AzureTDX = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x77, 0xe4, 0x8c, 0xa5, 0x0d, 0x52, 0x9a, 0x3b, 0x71, 0xfd, 0x73, 0x5a, 0x79, 0x33, 0xcf, 0x5a, 0xe6, 0x19, 0x45, 0x9b, 0x86, 0x80, 0x70, 0x92, 0x81, 0xf6, 0x2c, 0x8b, 0xb4, 0x48, 0xfb, 0x38}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x83, 0x1d, 0x7d, 0x4f, 0xbd, 0x1f, 0x8b, 0xa5, 0xd1, 0x5e, 0x77, 0x58, 0xe7, 0x81, 0xa4, 0x8f, 0xcf, 0xc3, 0xdc, 0x7f, 0x0c, 0xdb, 0xf9, 0x3b, 0xa5, 0x08, 0x6e, 0x89, 0x11, 0xf9, 0xec, 0x4a}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x06, 0xfb, 0x71, 0xdf, 0xc3, 0xba, 0x72, 0xbd, 0x7b, 0xed, 0x9f, 0xa6, 0xa7, 0x44, 0x96, 0x68, 0xcd, 0x5b, 0xee, 0x09, 0x1d, 0x9e, 0x3e, 0x3c, 0x4e, 0xd0, 0xfb, 0x71, 0x6d, 0x90, 0x4f, 0x1d}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} azure_AzureTrustedLaunch M - gcp_GCPSEVES = M{1: {Expected: []byte{0x36, 0x95, 0xdc, 0xc5, 0x5e, 0x3a, 0xa3, 0x40, 0x27, 0xc2, 0x77, 0x93, 0xc8, 0x5c, 0x72, 0x3c, 0x69, 0x7d, 0x70, 0x8c, 0x42, 0xd1, 0xf7, 0x3b, 0xd6, 0xfa, 0x4f, 0x26, 0x60, 0x8a, 0x5b, 0x24}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x3a, 0xec, 0xda, 0x76, 0xbf, 0xa0, 0x07, 0xc7, 0x5b, 0x10, 0x35, 0x50, 0x2c, 0x5d, 0x7a, 0xe6, 0xcc, 0xe0, 0x80, 0xd7, 0xe2, 0xb8, 0x00, 0x9c, 0xd8, 0x90, 0x9a, 0x7d, 0x87, 0xeb, 0x1c, 0x5a}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xcc, 0x06, 0x30, 0x20, 0x3c, 0x92, 0x65, 0x55, 0x77, 0x3f, 0x88, 0x2d, 0x32, 0x29, 0xf6, 0xb5, 0xb3, 0xdc, 0xd1, 0xa0, 0xd2, 0x93, 0xfd, 0x41, 0xdd, 0xf4, 0xfd, 0xf0, 0x5a, 0x89, 0x36, 0x39}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xcb, 0xcb, 0xf6, 0x3b, 0xc5, 0xf0, 0xf7, 0x84, 0xba, 0x2a, 0xe2, 0xfc, 0x87, 0xde, 0xa5, 0x95, 0xb0, 0xdd, 0x85, 0xae, 0x45, 0x95, 0xb0, 0xb9, 0x67, 0x3a, 0x30, 0xff, 0xd4, 0x5b, 0xbe, 0x42}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} - gcp_GCPSEVSNP = M{1: {Expected: []byte{0x36, 0x95, 0xdc, 0xc5, 0x5e, 0x3a, 0xa3, 0x40, 0x27, 0xc2, 0x77, 0x93, 0xc8, 0x5c, 0x72, 0x3c, 0x69, 0x7d, 0x70, 0x8c, 0x42, 0xd1, 0xf7, 0x3b, 0xd6, 0xfa, 0x4f, 0x26, 0x60, 0x8a, 0x5b, 0x24}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x1c, 0x21, 0x78, 0x82, 0x9b, 0x71, 0x04, 0x9a, 0xcf, 0x8b, 0x83, 0x6e, 0xe0, 0x35, 0x04, 0xa8, 0x00, 0x36, 0x56, 0xad, 0x6f, 0x2d, 0xf8, 0x4f, 0x2f, 0x5d, 0xde, 0x27, 0x03, 0xec, 0x10, 0x5b}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xad, 0xd3, 0x60, 0x79, 0xcd, 0x57, 0x84, 0x15, 0xa5, 0x48, 0xd2, 0x78, 0x2a, 0x7c, 0x10, 0x0f, 0x6c, 0xe4, 0x7c, 0xde, 0x43, 0xd8, 0x37, 0xf9, 0x07, 0xb6, 0x92, 0xb3, 0xbf, 0xec, 0x24, 0xb4}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x81, 0x8e, 0x84, 0xee, 0x6b, 0x1b, 0x0c, 0x1a, 0xea, 0xfe, 0xae, 0x21, 0x49, 0x21, 0x34, 0x43, 0x2d, 0xdf, 0x20, 0xbd, 0x08, 0xe0, 0x94, 0x68, 0xc3, 0xe0, 0xd7, 0xa3, 0x19, 0x9f, 0x27, 0x83}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} - openstack_QEMUVTPM = M{4: {Expected: []byte{0xb0, 0x36, 0xf1, 0x26, 0x58, 0x82, 0x19, 0x07, 0xc1, 0x41, 0x98, 0x3c, 0x00, 0x45, 0xe8, 0xee, 0xc0, 0xc7, 0x1a, 0x58, 0x3c, 0x1f, 0x9b, 0x0f, 0xfe, 0xd7, 0xfd, 0x30, 0xdc, 0x84, 0xaa, 0x46}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x6a, 0x4a, 0xfe, 0x4f, 0x76, 0x1e, 0xef, 0xdd, 0x65, 0x3c, 0x3d, 0xc8, 0x62, 0x73, 0x17, 0xb5, 0xec, 0xda, 0xc2, 0x7f, 0x40, 0x3f, 0x1d, 0xfd, 0x54, 0xa1, 0x39, 0x4f, 0x39, 0x43, 0x70, 0xea}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x8c, 0xf1, 0x78, 0x9a, 0xe6, 0xe8, 0xb6, 0x01, 0xc6, 0x07, 0x61, 0x49, 0x6b, 0x8d, 0xc8, 0xaf, 0xda, 0xcc, 0xa1, 0xa9, 0x3e, 0xf1, 0xfa, 0x6c, 0x77, 0x29, 0x35, 0x3c, 0x13, 0xf4, 0x25, 0x1e}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + gcp_GCPSEVES = M{1: {Expected: []byte{0x36, 0x95, 0xdc, 0xc5, 0x5e, 0x3a, 0xa3, 0x40, 0x27, 0xc2, 0x77, 0x93, 0xc8, 0x5c, 0x72, 0x3c, 0x69, 0x7d, 0x70, 0x8c, 0x42, 0xd1, 0xf7, 0x3b, 0xd6, 0xfa, 0x4f, 0x26, 0x60, 0x8a, 0x5b, 0x24}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xb3, 0xc3, 0x6a, 0x88, 0xf7, 0xa3, 0x51, 0x4d, 0x25, 0xc5, 0xcc, 0x2b, 0x2a, 0x05, 0x47, 0xb5, 0xda, 0x76, 0x66, 0x2e, 0xe5, 0x90, 0x11, 0xb5, 0x29, 0xbc, 0xfc, 0x07, 0x62, 0x4b, 0xb9, 0x3f}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x8a, 0x97, 0x88, 0x17, 0x3f, 0x55, 0x40, 0x9d, 0x5f, 0x6e, 0x90, 0xee, 0x0f, 0x9a, 0x22, 0x7a, 0xa6, 0x2f, 0xf7, 0xbc, 0x78, 0xd6, 0xbc, 0x85, 0x28, 0xd9, 0x75, 0xe7, 0x94, 0x28, 0x95, 0x85}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x52, 0xca, 0xc5, 0xa6, 0x44, 0xb9, 0xf0, 0xc7, 0x5b, 0x32, 0x42, 0x03, 0x1f, 0x7c, 0x80, 0x03, 0xdb, 0xdc, 0x3c, 0xc7, 0xc4, 0x0b, 0xd3, 0x83, 0x8a, 0xef, 0x0c, 0x85, 0x7b, 0xbf, 0xf1, 0x8d}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + openstack_QEMUVTPM = M{4: {Expected: []byte{0xc5, 0x70, 0xa4, 0xff, 0xba, 0xc6, 0x7b, 0x93, 0x48, 0x88, 0x6b, 0x11, 0xe2, 0x80, 0xa5, 0xf0, 0x43, 0xc1, 0x2f, 0xba, 0x8e, 0xb3, 0xfb, 0x36, 0x6d, 0x71, 0x8f, 0x7c, 0x85, 0x97, 0x44, 0x98}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x45, 0x06, 0xb3, 0xfb, 0xcb, 0xd3, 0x27, 0x21, 0x2a, 0xb6, 0x52, 0xf8, 0x68, 0x65, 0x69, 0x88, 0x6e, 0xb5, 0x83, 0xd3, 0x97, 0xe0, 0x6a, 0x77, 0xa8, 0xdf, 0xeb, 0xb8, 0xe0, 0xa4, 0x01, 0xe2}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x7c, 0xd6, 0xa4, 0xe5, 0x2c, 0x00, 0x42, 0x46, 0x3f, 0xe4, 0xd6, 0x07, 0x21, 0xc0, 0xc2, 0xff, 0xb4, 0xcd, 0xc1, 0xf9, 0x3d, 0xad, 0xd8, 0x8d, 0x48, 0xc2, 0x71, 0xef, 0xcc, 0x5f, 0x13, 0x14}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} qemu_QEMUTDX M - qemu_QEMUVTPM = M{4: {Expected: []byte{0x8e, 0xce, 0xaf, 0x83, 0x1f, 0xed, 0x91, 0x03, 0x5a, 0x71, 0xca, 0xd9, 0x5a, 0x54, 0x78, 0x46, 0xbc, 0x91, 0xa4, 0x55, 0xde, 0xb4, 0x59, 0xaa, 0xd2, 0xe5, 0x53, 0x7b, 0x32, 0x53, 0x41, 0xbd}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x61, 0x05, 0x4a, 0x64, 0x2e, 0x86, 0x25, 0x48, 0x4d, 0x22, 0xdd, 0x91, 0x50, 0x8b, 0x35, 0x27, 0x7e, 0x82, 0x9f, 0x52, 0x1a, 0x50, 0x93, 0xe3, 0xa7, 0x33, 0x14, 0x82, 0x51, 0x31, 0x49, 0xaa}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xbd, 0x32, 0x39, 0xb7, 0x43, 0x9e, 0x3c, 0x85, 0x8f, 0x5a, 0x94, 0x49, 0xe1, 0x10, 0x6b, 0x66, 0x85, 0xc7, 0xa5, 0x5a, 0x79, 0x4a, 0xaf, 0x11, 0xb6, 0x68, 0x93, 0xae, 0x29, 0x2c, 0xde, 0x04}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} + qemu_QEMUVTPM = M{4: {Expected: []byte{0xd7, 0x4d, 0x99, 0xcd, 0x10, 0xb3, 0xf6, 0x43, 0xd2, 0x91, 0xff, 0x6d, 0x88, 0x88, 0xe2, 0xe4, 0xc5, 0x5e, 0x6d, 0x48, 0xc8, 0x7a, 0xde, 0xac, 0xfb, 0xd3, 0xf5, 0x51, 0x3e, 0xb8, 0x2b, 0x9e}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xea, 0x05, 0xc1, 0x74, 0x83, 0x05, 0x0e, 0x73, 0x63, 0x73, 0x77, 0x6d, 0xa6, 0x2b, 0xec, 0x0b, 0x3c, 0x69, 0x03, 0x22, 0x10, 0xd3, 0xaa, 0x9f, 0xe3, 0x3a, 0xa4, 0x0d, 0x5f, 0x37, 0x2c, 0x21}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x91, 0xe2, 0x79, 0xac, 0x19, 0x51, 0x61, 0x6d, 0x0d, 0xac, 0x0b, 0xe4, 0x05, 0x20, 0x66, 0x5b, 0xb0, 0xe5, 0x1d, 0xb5, 0x94, 0xd9, 0x47, 0x41, 0xcf, 0x3f, 0x0e, 0x2e, 0x1e, 0xf1, 0x30, 0x43}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}} ) diff --git a/internal/attestation/measurements/measurements_oss.go b/internal/attestation/measurements/measurements_oss.go index 895a5d258..552d6bd26 100644 --- a/internal/attestation/measurements/measurements_oss.go +++ b/internal/attestation/measurements/measurements_oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements @@ -64,15 +64,6 @@ var ( 13: WithAllBytes(0x00, Enforce, PCRMeasurementLength), uint32(PCRIndexClusterID): WithAllBytes(0x00, Enforce, PCRMeasurementLength), } - gcp_GCPSEVSNP = M{ - 4: PlaceHolderMeasurement(PCRMeasurementLength), - 8: WithAllBytes(0x00, Enforce, PCRMeasurementLength), - 9: PlaceHolderMeasurement(PCRMeasurementLength), - 11: WithAllBytes(0x00, Enforce, PCRMeasurementLength), - 12: PlaceHolderMeasurement(PCRMeasurementLength), - 13: WithAllBytes(0x00, Enforce, PCRMeasurementLength), - uint32(PCRIndexClusterID): WithAllBytes(0x00, Enforce, PCRMeasurementLength), - } openstack_QEMUVTPM = M{ 4: PlaceHolderMeasurement(PCRMeasurementLength), 8: WithAllBytes(0x00, Enforce, PCRMeasurementLength), diff --git a/internal/attestation/measurements/measurements_test.go b/internal/attestation/measurements/measurements_test.go index f2cdf4d50..73cee7479 100644 --- a/internal/attestation/measurements/measurements_test.go +++ b/internal/attestation/measurements/measurements_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements import ( "bytes" + "context" "encoding/json" "io" "net/http" @@ -457,7 +458,7 @@ func TestMeasurementsFetchAndVerify(t *testing.T) { require.NoError(err) hash, err := m.fetchAndVerify( - t.Context(), client, verifier, + context.Background(), client, verifier, measurementsURL, signatureURL, tc.imageVersion, tc.csp, diff --git a/internal/attestation/measurements/overrides.go b/internal/attestation/measurements/overrides.go index d81851449..4da550d1e 100644 --- a/internal/attestation/measurements/overrides.go +++ b/internal/attestation/measurements/overrides.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package measurements @@ -78,7 +78,7 @@ var measurementOverridesForAttestationVariant = map[string]measurementOverride{ }, variant.AWSSEVSNP{}.String(): { ValueOverrides: []valueOverride{ - {Index: 0, Value: []byte{0xd6, 0xdf, 0x85, 0x53, 0x58, 0xf5, 0xb1, 0x0f, 0x06, 0xf0, 0xfa, 0xb3, 0xf4, 0x08, 0xad, 0x26, 0xcd, 0x16, 0x5a, 0x29, 0x49, 0xba, 0xd6, 0x9e, 0x2c, 0xc7, 0x56, 0x92, 0x52, 0x9e, 0x66, 0x2a}}, + {Index: 0, Value: []byte{0x7b, 0x06, 0x8c, 0x0c, 0x3a, 0xc2, 0x9a, 0xfe, 0x26, 0x41, 0x34, 0x53, 0x6b, 0x9b, 0xe2, 0x6f, 0x1d, 0x4c, 0xcd, 0x57, 0x5b, 0x88, 0xd3, 0xc3, 0xce, 0xab, 0xf3, 0x6a, 0xc9, 0x9c, 0x02, 0x78}}, }, }, } diff --git a/internal/attestation/qemu/issuer.go b/internal/attestation/qemu/issuer.go index bbc9b6c58..8214e8a27 100644 --- a/internal/attestation/qemu/issuer.go +++ b/internal/attestation/qemu/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package qemu diff --git a/internal/attestation/qemu/qemu.go b/internal/attestation/qemu/qemu.go index 1856470d4..424215a6e 100644 --- a/internal/attestation/qemu/qemu.go +++ b/internal/attestation/qemu/qemu.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/qemu/validator.go b/internal/attestation/qemu/validator.go index 001acb3d7..e2c172f3b 100644 --- a/internal/attestation/qemu/validator.go +++ b/internal/attestation/qemu/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package qemu diff --git a/internal/attestation/simulator/simulator.go b/internal/attestation/simulator/simulator.go index 5e612445d..03baabdf5 100644 --- a/internal/attestation/simulator/simulator.go +++ b/internal/attestation/simulator/simulator.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // TPM2 simulator used for unit tests. diff --git a/internal/attestation/simulator/simulator_disabled.go b/internal/attestation/simulator/simulator_disabled.go index 61adff724..1470e20cd 100644 --- a/internal/attestation/simulator/simulator_disabled.go +++ b/internal/attestation/simulator/simulator_disabled.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package simulator diff --git a/internal/attestation/snp/BUILD.bazel b/internal/attestation/snp/BUILD.bazel index 5cca9028f..700a3aa86 100644 --- a/internal/attestation/snp/BUILD.bazel +++ b/internal/attestation/snp/BUILD.bazel @@ -8,12 +8,11 @@ go_library( visibility = ["//:__subpackages__"], deps = [ "//internal/attestation", + "//internal/constants", "@com_github_google_go_sev_guest//abi", - "@com_github_google_go_sev_guest//client", "@com_github_google_go_sev_guest//kds", "@com_github_google_go_sev_guest//proto/sevsnp", "@com_github_google_go_sev_guest//verify/trust", - "@com_github_google_go_tpm_tools//proto/attest", ], ) diff --git a/internal/attestation/snp/snp.go b/internal/attestation/snp/snp.go index a296d7721..95cba55bf 100644 --- a/internal/attestation/snp/snp.go +++ b/internal/attestation/snp/snp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package SNP provides types shared by SNP-based attestation implementations. @@ -12,20 +12,16 @@ import ( "bytes" "crypto/x509" "encoding/pem" - "errors" "fmt" "github.com/edgelesssys/constellation/v2/internal/attestation" + "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/google/go-sev-guest/abi" - "github.com/google/go-sev-guest/client" "github.com/google/go-sev-guest/kds" spb "github.com/google/go-sev-guest/proto/sevsnp" "github.com/google/go-sev-guest/verify/trust" - "github.com/google/go-tpm-tools/proto/attest" ) -var errNoPemBlocks = errors.New("no PEM blocks found") - // Product returns the SEV product info currently supported by Constellation's SNP attestation. func Product() *spb.SevProduct { // sevProduct is the product info of the SEV platform as reported through CPUID[EAX=1]. @@ -33,26 +29,6 @@ func Product() *spb.SevProduct { return &spb.SevProduct{Name: spb.SevProduct_SEV_PRODUCT_MILAN, Stepping: 0} // Milan-B0 } -// GetExtendedReport retrieves the extended SNP report from the CVM. -func GetExtendedReport(reportData [64]byte) (report, certChain []byte, err error) { - qp, err := client.GetLeveledQuoteProvider() - if err != nil { - return nil, nil, fmt.Errorf("getting quote provider: %w", err) - } - quote, err := qp.GetRawQuoteAtLevel(reportData, 0) - if err != nil { - return nil, nil, fmt.Errorf("getting extended report: %w", err) - } - - // Parse the report and certificate chain from the quote. - report = quote - if len(quote) > abi.ReportSize { - report = quote[:abi.ReportSize] - certChain = quote[abi.ReportSize:] - } - return report, certChain, nil -} - // InstanceInfo contains the necessary information to establish trust in a SNP CVM. type InstanceInfo struct { // ReportSigner is the PEM-encoded certificate used to validate the attestation report's signature. @@ -63,7 +39,6 @@ type InstanceInfo struct { // AttestationReport is the attestation report from the vTPM (NVRAM) of the CVM. AttestationReport []byte Azure *AzureInstanceInfo - GCP *attest.GCEInstanceInfo } // AzureInstanceInfo contains Azure specific information related to SNP attestation. @@ -120,7 +95,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report, // AttestationWithCerts returns a formatted version of the attestation report and its certificates from the instanceInfo. // Certificates are retrieved in the following precedence: -// 1. ASK from issuer. On Azure: THIM. One AWS: not prefilled. (Go to option 2) On GCP: prefilled. +// 1. ASK or ARK from issuer. On Azure: THIM. One AWS: not prefilled. // 2. ASK or ARK from fallbackCerts. // 3. ASK or ARK from AMD KDS. func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter, @@ -131,7 +106,7 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter, return nil, fmt.Errorf("converting report to proto: %w", err) } - productName := kds.ProductLine(Product()) + productName := kds.ProductString(Product()) att := &spb.Attestation{ Report: report, @@ -145,28 +120,30 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter, return nil, fmt.Errorf("adding report signer: %w", err) } - // If a certificate chain was pre-fetched by the Issuer, parse it and format it. - // Make sure to only use the ask, since using an ark from the Issuer would invalidate security guarantees. - ask, _, err := a.ParseCertChain() - if err != nil && !errors.Is(err, errNoPemBlocks) { + // If the certificate chain from THIM is present, parse it and format it. + ask, ark, err := a.ParseCertChain() + if err != nil { logger.Warn(fmt.Sprintf("Error parsing certificate chain: %v", err)) } if ask != nil { - logger.Info("Using ASK certificate from pre-fetched certificate chain") + logger.Info("Using ASK certificate from Azure THIM") att.CertificateChain.AskCert = ask.Raw } + if ark != nil { + logger.Info("Using ARK certificate from Azure THIM") + att.CertificateChain.ArkCert = ark.Raw + } // If a cached ASK or an ARK from the Constellation config is present, use it. if att.CertificateChain.AskCert == nil && fallbackCerts.ask != nil { logger.Info("Using cached ASK certificate") att.CertificateChain.AskCert = fallbackCerts.ask.Raw } - if fallbackCerts.ark != nil { - logger.Info("Using cached ARK certificate") + if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil { + logger.Info(fmt.Sprintf("Using ARK certificate from %s", constants.ConfigFilename)) att.CertificateChain.ArkCert = fallbackCerts.ark.Raw } - - // Otherwise, retrieve missing certificates from AMD KDS. + // Otherwise, retrieve it from AMD KDS. if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil { logger.Info(fmt.Sprintf( "Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS", @@ -246,7 +223,7 @@ func (a *InstanceInfo) ParseCertChain() (ask, ark *x509.Certificate, retErr erro switch { case i == 1: - retErr = errNoPemBlocks + retErr = fmt.Errorf("no PEM blocks found") case len(rest) != 0: retErr = fmt.Errorf("remaining PEM block is not a valid certificate: %s", rest) } diff --git a/internal/attestation/snp/snp_test.go b/internal/attestation/snp/snp_test.go index 19485e47d..0179ac05b 100644 --- a/internal/attestation/snp/snp_test.go +++ b/internal/attestation/snp/snp_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package snp @@ -9,7 +9,6 @@ package snp import ( "crypto/x509" "encoding/hex" - "errors" "fmt" "regexp" "strings" @@ -35,13 +34,16 @@ func TestParseCertChain(t *testing.T) { wantAsk bool wantArk bool wantErr bool - errTarget error }{ "success": { certChain: defaultCertChain, wantAsk: true, wantArk: true, }, + "empty cert chain": { + certChain: []byte{}, + wantErr: true, + }, "more than two certificates": { certChain: append(defaultCertChain, defaultCertChain...), wantErr: true, @@ -50,11 +52,6 @@ func TestParseCertChain(t *testing.T) { certChain: []byte("invalid"), wantErr: true, }, - "empty cert chain": { - certChain: []byte{}, - wantErr: true, - errTarget: errNoPemBlocks, - }, "ark missing": { certChain: []byte(askOnly), wantAsk: true, @@ -76,9 +73,6 @@ func TestParseCertChain(t *testing.T) { ask, ark, err := instanceInfo.ParseCertChain() if tc.wantErr { assert.Error(err) - if tc.errTarget != nil { - assert.True(errors.Is(err, tc.errTarget)) - } } else { assert.NoError(err) assert.Equal(tc.wantAsk, ask != nil) @@ -155,24 +149,12 @@ func TestAttestationWithCerts(t *testing.T) { wantErr bool }{ "success": { - report: defaultReport, - idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", - reportSigner: testdata.AzureThimVCEK, - certChain: testdata.CertChain, - fallbackCerts: CertificateChain{ark: testdataArk}, - expectedArk: testdataArk, - expectedAsk: testdataAsk, - getter: newStubHTTPSGetter(&urlResponseMatcher{}, nil), - }, - "ark only in pre-fetched cert-chain": { report: defaultReport, idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", reportSigner: testdata.AzureThimVCEK, certChain: testdata.CertChain, expectedArk: testdataArk, expectedAsk: testdataAsk, - getter: newStubHTTPSGetter(nil, assert.AnError), - wantErr: true, }, "vlek success": { report: vlekReport, @@ -191,10 +173,9 @@ func TestAttestationWithCerts(t *testing.T) { ), }, "retrieve vcek": { - report: defaultReport, - idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", - certChain: testdata.CertChain, - fallbackCerts: CertificateChain{ark: testdataArk}, + report: defaultReport, + idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", + certChain: testdata.CertChain, getter: newStubHTTPSGetter( &urlResponseMatcher{ vcekResponse: testdata.AmdKdsVCEK, @@ -224,9 +205,25 @@ func TestAttestationWithCerts(t *testing.T) { idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", reportSigner: testdata.AzureThimVCEK, fallbackCerts: NewCertificateChain(exampleCert, exampleCert), - getter: newStubHTTPSGetter(&urlResponseMatcher{}, nil), - expectedArk: exampleCert, - expectedAsk: exampleCert, + getter: newStubHTTPSGetter( + &urlResponseMatcher{}, + nil, + ), + expectedArk: exampleCert, + expectedAsk: exampleCert, + }, + "use certchain with fallback certs": { + report: defaultReport, + idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1", + certChain: testdata.CertChain, + reportSigner: testdata.AzureThimVCEK, + fallbackCerts: NewCertificateChain(&x509.Certificate{}, &x509.Certificate{}), + getter: newStubHTTPSGetter( + &urlResponseMatcher{}, + nil, + ), + expectedArk: testdataArk, + expectedAsk: testdataAsk, }, "retrieve vcek and certchain": { report: defaultReport, @@ -245,12 +242,10 @@ func TestAttestationWithCerts(t *testing.T) { }, "report too short": { report: defaultReport[:len(defaultReport)-100], - getter: newStubHTTPSGetter(nil, assert.AnError), wantErr: true, }, "corrupted report": { report: defaultReport[10 : len(defaultReport)-10], - getter: newStubHTTPSGetter(nil, assert.AnError), wantErr: true, }, "certificate fetch error": { diff --git a/internal/attestation/snp/testdata/testdata.go b/internal/attestation/snp/testdata/testdata.go index ba93753bd..c749dd899 100644 --- a/internal/attestation/snp/testdata/testdata.go +++ b/internal/attestation/snp/testdata/testdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdata contains testing data for an attestation process. diff --git a/internal/attestation/tdx/issuer.go b/internal/attestation/tdx/issuer.go index 58be53de6..0dadb4b3c 100644 --- a/internal/attestation/tdx/issuer.go +++ b/internal/attestation/tdx/issuer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package tdx diff --git a/internal/attestation/tdx/tdx.go b/internal/attestation/tdx/tdx.go index 25141b609..ea0cb67c4 100644 --- a/internal/attestation/tdx/tdx.go +++ b/internal/attestation/tdx/tdx.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package TDX implements attestation for Intel TDX. diff --git a/internal/attestation/tdx/validator.go b/internal/attestation/tdx/validator.go index 6a5bde48a..dcf92d742 100644 --- a/internal/attestation/tdx/validator.go +++ b/internal/attestation/tdx/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package tdx diff --git a/internal/attestation/variant/variant.go b/internal/attestation/variant/variant.go index 97ed68244..43397a94b 100644 --- a/internal/attestation/variant/variant.go +++ b/internal/attestation/variant/variant.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -44,7 +44,6 @@ const ( awsNitroTPM = "aws-nitro-tpm" awsSEVSNP = "aws-sev-snp" gcpSEVES = "gcp-sev-es" - gcpSEVSNP = "gcp-sev-snp" azureTDX = "azure-tdx" azureSEVSNP = "azure-sev-snp" azureTrustedLaunch = "azure-trustedlaunch" @@ -55,7 +54,7 @@ const ( var providerAttestationMapping = map[cloudprovider.Provider][]Variant{ cloudprovider.AWS: {AWSSEVSNP{}, AWSNitroTPM{}}, cloudprovider.Azure: {AzureSEVSNP{}, AzureTDX{}, AzureTrustedLaunch{}}, - cloudprovider.GCP: {GCPSEVSNP{}, GCPSEVES{}}, + cloudprovider.GCP: {GCPSEVES{}}, cloudprovider.QEMU: {QEMUVTPM{}}, cloudprovider.OpenStack: {QEMUVTPM{}}, } @@ -111,8 +110,6 @@ func FromString(oid string) (Variant, error) { return AWSNitroTPM{}, nil case gcpSEVES: return GCPSEVES{}, nil - case gcpSEVSNP: - return GCPSEVSNP{}, nil case azureSEVSNP: return AzureSEVSNP{}, nil case azureTrustedLaunch: @@ -212,24 +209,6 @@ func (GCPSEVES) Equal(other Getter) bool { return other.OID().Equal(GCPSEVES{}.OID()) } -// GCPSEVSNP holds the GCP SEV-SNP OID. -type GCPSEVSNP struct{} - -// OID returns the struct's object identifier. -func (GCPSEVSNP) OID() asn1.ObjectIdentifier { - return asn1.ObjectIdentifier{1, 3, 9900, 3, 2} -} - -// String returns the string representation of the OID. -func (GCPSEVSNP) String() string { - return gcpSEVSNP -} - -// Equal returns true if the other variant is also GCPSEVSNP. -func (GCPSEVSNP) Equal(other Getter) bool { - return other.OID().Equal(GCPSEVSNP{}.OID()) -} - // AzureTDX holds the OID for Azure TDX CVMs. type AzureTDX struct{} diff --git a/internal/attestation/vtpm/attestation.go b/internal/attestation/vtpm/attestation.go index 98d3a9ad2..77c396b9a 100644 --- a/internal/attestation/vtpm/attestation.go +++ b/internal/attestation/vtpm/attestation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package vtpm @@ -9,12 +9,10 @@ package vtpm import ( "context" "crypto" - "crypto/sha256" "encoding/json" "errors" "fmt" "io" - "slices" "github.com/google/go-sev-guest/proto/sevsnp" tpmClient "github.com/google/go-tpm-tools/client" @@ -125,7 +123,12 @@ func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res } defer aK.Close() + // Create an attestation using the loaded key extraData := attestation.MakeExtraData(userData, nonce) + tpmAttestation, err := aK.Attest(tpmClient.AttestOpts{Nonce: extraData}) + if err != nil { + return nil, fmt.Errorf("creating attestation: %w", err) + } // Fetch instance info of the VM instanceInfo, err := i.getInstanceInfo(ctx, tpm, extraData) @@ -133,14 +136,6 @@ func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res return nil, fmt.Errorf("fetching instance info: %w", err) } - tpmNonce := makeTpmNonce(instanceInfo, extraData) - - // Create an attestation using the loaded key - tpmAttestation, err := aK.Attest(tpmClient.AttestOpts{Nonce: tpmNonce[:]}) - if err != nil { - return nil, fmt.Errorf("creating attestation: %w", err) - } - attDoc := AttestationDocument{ Attestation: tpmAttestation, InstanceInfo: instanceInfo, @@ -213,13 +208,11 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte return nil, fmt.Errorf("validating attestation public key: %w", err) } - tpmNonce := makeTpmNonce(attDoc.InstanceInfo, extraData) - // Verify the TPM attestation state, err := tpmServer.VerifyAttestation( attDoc.Attestation, tpmServer.VerifyOpts{ - Nonce: tpmNonce[:], + Nonce: extraData, TrustedAKs: []crypto.PublicKey{aKP}, AllowSHA1: false, }, @@ -294,9 +287,3 @@ func GetSelectedMeasurements(open TPMOpenFunc, selection tpm2.PCRSelection) (mea return m, nil } - -// makeTpmNonce creates a nonce for the TPM attestation and returns it in its marshaled form. -func makeTpmNonce(instanceInfo []byte, extraData []byte) [32]byte { - // Finding: GCP nonces cannot be larger than 32 bytes. - return sha256.Sum256(slices.Concat(instanceInfo, extraData)) -} diff --git a/internal/attestation/vtpm/attestation_test.go b/internal/attestation/vtpm/attestation_test.go index 4b1e58b75..311b9ebad 100644 --- a/internal/attestation/vtpm/attestation_test.go +++ b/internal/attestation/vtpm/attestation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package vtpm @@ -90,7 +90,7 @@ func TestValidate(t *testing.T) { nonce := []byte{1, 2, 3, 4} challenge := []byte("Constellation") - ctx := t.Context() + ctx := context.Background() attDocRaw, err := issuer.Issue(ctx, challenge, nonce) require.NoError(err) @@ -347,7 +347,7 @@ func TestFailIssuer(t *testing.T) { tc.issuer.log = logger.NewTest(t) - _, err := tc.issuer.Issue(t.Context(), tc.userData, tc.nonce) + _, err := tc.issuer.Issue(context.Background(), tc.userData, tc.nonce) assert.Error(err) }) } diff --git a/internal/attestation/vtpm/vtpm.go b/internal/attestation/vtpm/vtpm.go index 26b568043..3a969eb2d 100644 --- a/internal/attestation/vtpm/vtpm.go +++ b/internal/attestation/vtpm/vtpm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/attestation/vtpm/vtpm_test.go b/internal/attestation/vtpm/vtpm_test.go index fece3a088..2a6e20668 100644 --- a/internal/attestation/vtpm/vtpm_test.go +++ b/internal/attestation/vtpm/vtpm_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package vtpm diff --git a/internal/cloud/aws/aws.go b/internal/cloud/aws/aws.go index 3d0c8b316..bfa1d0b54 100644 --- a/internal/cloud/aws/aws.go +++ b/internal/cloud/aws/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cloud/aws/aws_test.go b/internal/cloud/aws/aws_test.go index d8b7541b7..e1b05ee88 100644 --- a/internal/cloud/aws/aws_test.go +++ b/internal/cloud/aws/aws_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package aws @@ -185,7 +185,7 @@ func TestSelf(t *testing.T) { ec2: tc.ec2API, } - self, err := m.Self(t.Context()) + self, err := m.Self(context.Background()) if tc.wantErr { assert.Error(err) return @@ -431,7 +431,7 @@ func TestList(t *testing.T) { ec2: tc.ec2, } - list, err := m.List(t.Context()) + list, err := m.List(context.Background()) if tc.wantErr { assert.Error(err) return @@ -694,7 +694,7 @@ func TestGetLoadBalancerEndpoint(t *testing.T) { ec2: successfulEC2, } - gotHost, gotPort, err := m.GetLoadBalancerEndpoint(t.Context()) + gotHost, gotPort, err := m.GetLoadBalancerEndpoint(context.Background()) if tc.wantErr { assert.Error(err) return diff --git a/internal/cloud/azure/BUILD.bazel b/internal/cloud/azure/BUILD.bazel index 29f4750d7..ff8fbea92 100644 --- a/internal/cloud/azure/BUILD.bazel +++ b/internal/cloud/azure/BUILD.bazel @@ -7,8 +7,6 @@ go_library( "azure.go", "imds.go", "interface.go", - "iptables_cross.go", - "iptables_linux.go", ], importpath = "github.com/edgelesssys/constellation/v2/internal/cloud/azure", visibility = ["//:__subpackages__"], @@ -20,17 +18,11 @@ go_library( "//internal/role", "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azidentity//:azidentity", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6//:armcompute", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v6//:armnetwork", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "@io_k8s_kubernetes//pkg/util/iptables", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "@io_k8s_kubernetes//pkg/util/iptables", - ], - "//conditions:default": [], - }), + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5//:armcompute", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v5//:armnetwork", + "@io_k8s_kubernetes//pkg/util/iptables", + "@io_k8s_utils//exec", + ], ) go_test( @@ -46,8 +38,8 @@ go_test( "//internal/role", "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azcore//to", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6//:armcompute", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v6//:armnetwork", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5//:armcompute", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v5//:armnetwork", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_golang_google_grpc//test/bufconn", diff --git a/internal/cloud/azure/azure.go b/internal/cloud/azure/azure.go index 02b78c9b2..52a468471 100644 --- a/internal/cloud/azure/azure.go +++ b/internal/cloud/azure/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -19,17 +19,20 @@ import ( "context" "errors" "fmt" + "log/slog" "path" "strconv" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5" "github.com/edgelesssys/constellation/v2/internal/cloud" "github.com/edgelesssys/constellation/v2/internal/cloud/azureshared" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/role" + "k8s.io/kubernetes/pkg/util/iptables" + "k8s.io/utils/exec" ) // Cloud provides Azure metadata and API access. @@ -392,8 +395,8 @@ func (c *Cloud) getLoadBalancerPublicIP(ctx context.Context) (string, error) { } /* -// TODO: uncomment and use as soon as we switch the primary endpoint to DNS. -// We have to think about how to handle DNS for internal load balancers +// TODO(malt3): uncomment and use as soon as we switch the primary endpoint to DNS. +// Addition from 3u13r: We have to think about how to handle DNS for internal load balancers // that only have a private IP address and therefore no DNS name by default. // // getLoadBalancerDNSName retrieves the dns name of the load balancer. @@ -436,6 +439,68 @@ func (c *Cloud) getLoadBalancerDNSName(ctx context.Context) (string, error) { } */ +// PrepareControlPlaneNode sets up iptables for the control plane node only +// if an internal load balancer is used. +// +// This is needed since during `kubeadm init` the API server must talk to the +// kubeAPIEndpoint, which is the load balancer IP address. During that time, the +// only healthy VM is the VM itself. Therefore, traffic is sent to the load balancer +// and the 5-tuple is (VM IP, , LB IP, 6443, TCP). +// Now the load balancer does not re-write the source IP address only the destination (DNAT). +// Therefore the 5-tuple is (VM IP, , VM IP, 6443, TCP). +// Now the VM responds to the SYN packet with a SYN-ACK packet, but the outgoing +// connection waits on a response from the load balancer and not the VM therefore +// dropping the packet. +// +// OpenShift also uses the same mechanism to redirect traffic to the API server: +// https://github.com/openshift/machine-config-operator/blob/e453bd20bac0e48afa74e9a27665abaf454d93cd/templates/master/00-master/azure/files/opt-libexec-openshift-azure-routes-sh.yaml +func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *slog.Logger) error { + selfMetadata, err := c.Self(ctx) + if err != nil { + return fmt.Errorf("failed to get self metadata: %w", err) + } + + // skipping iptables setup for worker nodes + if selfMetadata.Role != role.ControlPlane { + log.Info("not a control plane node, skipping iptables setup") + return nil + } + + // skipping iptables setup if no internal LB exists e.g. + // for public LB architectures + loadbalancerIP, err := c.getLoadBalancerPrivateIP(ctx) + if err != nil { + log.With(slog.Any("error", err)).Warn("skipping iptables setup, failed to get load balancer private IP") + return nil + } + + log.Info(fmt.Sprintf("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP)) + + iptablesExec := iptables.New(exec.New(), iptables.ProtocolIPv4) + if err != nil { + return fmt.Errorf("failed to create iptables client: %w", err) + } + + const chainName = "azure-lb-nat" + if _, err := iptablesExec.EnsureChain(iptables.TableNAT, chainName); err != nil { + return fmt.Errorf("failed to create iptables chain: %w", err) + } + + if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, "PREROUTING", "-j", chainName); err != nil { + return fmt.Errorf("failed to add rule to iptables chain: %w", err) + } + + if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, "OUTPUT", "-j", chainName); err != nil { + return fmt.Errorf("failed to add rule to iptables chain: %w", err) + } + + if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, chainName, "--dst", loadbalancerIP, "-p", "tcp", "--dport", "6443", "-j", "REDIRECT"); err != nil { + return fmt.Errorf("failed to add rule to iptables chain: %w", err) + } + + return nil +} + // convertToInstanceMetadata converts a armcomputev2.VirtualMachineScaleSetVM to a metadata.InstanceMetadata. func convertToInstanceMetadata(vm armcompute.VirtualMachineScaleSetVM, networkInterfaces []armnetwork.Interface, ) (metadata.InstanceMetadata, error) { diff --git a/internal/cloud/azure/azure_test.go b/internal/cloud/azure/azure_test.go index 84182f9a0..f364f3f2f 100644 --- a/internal/cloud/azure/azure_test.go +++ b/internal/cloud/azure/azure_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azure @@ -13,8 +13,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5" "github.com/edgelesssys/constellation/v2/internal/cloud" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/role" @@ -150,7 +150,7 @@ func TestGetInstance(t *testing.T) { scaleSetsVMAPI: tc.scaleSetsVMAPI, netIfacAPI: tc.networkInterfacesAPI, } - instance, err := metadata.getInstance(t.Context(), tc.providerID) + instance, err := metadata.getInstance(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -186,7 +186,7 @@ func TestUID(t *testing.T) { cloud := &Cloud{ imds: tc.imdsAPI, } - uid, err := cloud.UID(t.Context()) + uid, err := cloud.UID(context.Background()) if tc.wantErr { assert.Error(err) return @@ -222,7 +222,7 @@ func TestInitSecretHash(t *testing.T) { cloud := &Cloud{ imds: tc.imdsAPI, } - initSecretHash, err := cloud.InitSecretHash(t.Context()) + initSecretHash, err := cloud.InitSecretHash(context.Background()) if tc.wantErr { assert.Error(err) return @@ -410,7 +410,7 @@ func TestList(t *testing.T) { scaleSetsAPI: tc.scaleSetsAPI, scaleSetsVMAPI: tc.scaleSetsVMAPI, } - instances, err := azureMetadata.List(t.Context()) + instances, err := azureMetadata.List(context.Background()) if tc.wantErr { assert.Error(err) @@ -473,7 +473,7 @@ func TestGetNetworkSecurityGroupName(t *testing.T) { metadata := Cloud{ secGroupAPI: tc.securityGroupsAPI, } - name, err := metadata.getNetworkSecurityGroupName(t.Context(), "resource-group", "uid") + name, err := metadata.getNetworkSecurityGroupName(context.Background(), "resource-group", "uid") if tc.wantErr { assert.Error(err) return @@ -547,7 +547,7 @@ func TestGetSubnetworkCIDR(t *testing.T) { imds: tc.imdsAPI, virtNetAPI: tc.virtualNetworksAPI, } - subnetworkCIDR, err := metadata.getSubnetworkCIDR(t.Context()) + subnetworkCIDR, err := metadata.getSubnetworkCIDR(context.Background()) if tc.wantErr { assert.Error(err) return @@ -708,7 +708,7 @@ func TestGetLoadBalancerEndpoint(t *testing.T) { loadBalancerAPI: tc.loadBalancerAPI, pubIPAPI: tc.publicIPAddressesAPI, } - gotHost, gotPort, err := metadata.GetLoadBalancerEndpoint(t.Context()) + gotHost, gotPort, err := metadata.GetLoadBalancerEndpoint(context.Background()) if tc.wantErr { assert.Error(err) return diff --git a/internal/cloud/azure/imds.go b/internal/cloud/azure/imds.go index 6dea3d667..21309b38a 100644 --- a/internal/cloud/azure/imds.go +++ b/internal/cloud/azure/imds.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azure diff --git a/internal/cloud/azure/imds_test.go b/internal/cloud/azure/imds_test.go index bf119f7b3..242a052e7 100644 --- a/internal/cloud/azure/imds_test.go +++ b/internal/cloud/azure/imds_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azure @@ -214,7 +214,7 @@ func TestIMDSClient(t *testing.T) { } iClient := IMDSClient{client: &hClient} - ctx := t.Context() + ctx := context.Background() id, err := iClient.providerID(ctx) if tc.wantProviderIDErr { diff --git a/internal/cloud/azure/interface.go b/internal/cloud/azure/interface.go index 63a5afba9..fc61e09b2 100644 --- a/internal/cloud/azure/interface.go +++ b/internal/cloud/azure/interface.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azure @@ -10,8 +10,8 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5" ) type imdsAPI interface { diff --git a/internal/cloud/azure/iptables_cross.go b/internal/cloud/azure/iptables_cross.go deleted file mode 100644 index d64b09db2..000000000 --- a/internal/cloud/azure/iptables_cross.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package azure - -import ( - "context" - "log/slog" -) - -// PrepareControlPlaneNode is only supported on Linux. -func (c *Cloud) PrepareControlPlaneNode(_ context.Context, _ *slog.Logger) error { - panic("azure.*Cloud.PrepareControlPlaneNode is only supported on Linux") -} diff --git a/internal/cloud/azure/iptables_linux.go b/internal/cloud/azure/iptables_linux.go deleted file mode 100644 index c2d42a843..000000000 --- a/internal/cloud/azure/iptables_linux.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build linux - -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package azure - -import ( - "context" - "fmt" - "log/slog" - - "github.com/edgelesssys/constellation/v2/internal/role" - "k8s.io/kubernetes/pkg/util/iptables" -) - -// PrepareControlPlaneNode sets up iptables for the control plane node only -// if an internal load balancer is used. -// -// This is needed since during `kubeadm init` the API server must talk to the -// kubeAPIEndpoint, which is the load balancer IP address. During that time, the -// only healthy VM is the VM itself. Therefore, traffic is sent to the load balancer -// and the 5-tuple is (VM IP, , LB IP, 6443, TCP). -// Now the load balancer does not re-write the source IP address only the destination (DNAT). -// Therefore the 5-tuple is (VM IP, , VM IP, 6443, TCP). -// Now the VM responds to the SYN packet with a SYN-ACK packet, but the outgoing -// connection waits on a response from the load balancer and not the VM therefore -// dropping the packet. -// -// OpenShift also uses the same mechanism to redirect traffic to the API server: -// https://github.com/openshift/machine-config-operator/blob/e453bd20bac0e48afa74e9a27665abaf454d93cd/templates/master/00-master/azure/files/opt-libexec-openshift-azure-routes-sh.yaml -func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *slog.Logger) error { - selfMetadata, err := c.Self(ctx) - if err != nil { - return fmt.Errorf("failed to get self metadata: %w", err) - } - - // skipping iptables setup for worker nodes - if selfMetadata.Role != role.ControlPlane { - log.Info("not a control plane node, skipping iptables setup") - return nil - } - - // skipping iptables setup if no internal LB exists e.g. - // for public LB architectures - loadbalancerIP, err := c.getLoadBalancerPrivateIP(ctx) - if err != nil { - log.With(slog.Any("error", err)).Warn("skipping iptables setup, failed to get load balancer private IP") - return nil - } - - log.Info(fmt.Sprintf("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP)) - iptablesExec := iptables.New(iptables.ProtocolIPv4) - - const chainName = "azure-lb-nat" - if _, err := iptablesExec.EnsureChain(iptables.TableNAT, chainName); err != nil { - return fmt.Errorf("failed to create iptables chain: %w", err) - } - - if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, "PREROUTING", "-j", chainName); err != nil { - return fmt.Errorf("failed to add rule to iptables chain: %w", err) - } - - if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, "OUTPUT", "-j", chainName); err != nil { - return fmt.Errorf("failed to add rule to iptables chain: %w", err) - } - - if _, err := iptablesExec.EnsureRule(iptables.Append, iptables.TableNAT, chainName, "--dst", loadbalancerIP, "-p", "tcp", "--dport", "6443", "-j", "REDIRECT"); err != nil { - return fmt.Errorf("failed to add rule to iptables chain: %w", err) - } - - return nil -} diff --git a/internal/cloud/azureshared/appcredentials.go b/internal/cloud/azureshared/appcredentials.go index fe5c8b6d9..7c6c7ec65 100644 --- a/internal/cloud/azureshared/appcredentials.go +++ b/internal/cloud/azureshared/appcredentials.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azureshared diff --git a/internal/cloud/azureshared/appcredentials_test.go b/internal/cloud/azureshared/appcredentials_test.go index 27cddf96f..0c7d65424 100644 --- a/internal/cloud/azureshared/appcredentials_test.go +++ b/internal/cloud/azureshared/appcredentials_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azureshared diff --git a/internal/cloud/azureshared/azureshared.go b/internal/cloud/azureshared/azureshared.go index f8909e3b7..38cf82b27 100644 --- a/internal/cloud/azureshared/azureshared.go +++ b/internal/cloud/azureshared/azureshared.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cloud/azureshared/metadata.go b/internal/cloud/azureshared/metadata.go index 1289adc8d..b21ffd4c3 100644 --- a/internal/cloud/azureshared/metadata.go +++ b/internal/cloud/azureshared/metadata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azureshared diff --git a/internal/cloud/azureshared/metadata_test.go b/internal/cloud/azureshared/metadata_test.go index a7c562fe6..61d71a94d 100644 --- a/internal/cloud/azureshared/metadata_test.go +++ b/internal/cloud/azureshared/metadata_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azureshared diff --git a/internal/cloud/cloud.go b/internal/cloud/cloud.go index 7d255bd19..e7e9a88ca 100644 --- a/internal/cloud/cloud.go +++ b/internal/cloud/cloud.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cloud/cloudprovider/cloudprovider.go b/internal/cloud/cloudprovider/cloudprovider.go index c25b18d69..47791f943 100644 --- a/internal/cloud/cloudprovider/cloudprovider.go +++ b/internal/cloud/cloudprovider/cloudprovider.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudprovider @@ -16,9 +16,6 @@ import ( // Provider is cloud provider used by the CLI. type Provider uint32 -// Tags is the type that holds additional tags for cloud resources. -type Tags map[string]string - const ( // Unknown is default value for Provider. Unknown Provider = iota diff --git a/internal/cloud/cloudprovider/cloudprovider_test.go b/internal/cloud/cloudprovider/cloudprovider_test.go index e7154195c..b8c7a4d40 100644 --- a/internal/cloud/cloudprovider/cloudprovider_test.go +++ b/internal/cloud/cloudprovider/cloudprovider_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cloudprovider diff --git a/internal/cloud/gcp/gcp.go b/internal/cloud/gcp/gcp.go index f38d848f6..ba689ef3a 100644 --- a/internal/cloud/gcp/gcp.go +++ b/internal/cloud/gcp/gcp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cloud/gcp/gcp_test.go b/internal/cloud/gcp/gcp_test.go index 4066f3c5b..fa2179163 100644 --- a/internal/cloud/gcp/gcp_test.go +++ b/internal/cloud/gcp/gcp_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcp @@ -172,7 +172,7 @@ func TestGetInstance(t *testing.T) { instanceAPI: &tc.instanceAPI, subnetAPI: &tc.subnetAPI, } - instance, err := cloud.getInstance(t.Context(), tc.projectID, tc.zone, tc.instanceName) + instance, err := cloud.getInstance(context.Background(), tc.projectID, tc.zone, tc.instanceName) if tc.wantErr { assert.Error(err) @@ -474,7 +474,7 @@ func TestGetLoadbalancerEndpoint(t *testing.T) { regionalForwardingRulesAPI: &tc.regionalForwardingRulesAPI, } - gotHost, gotPort, err := cloud.GetLoadBalancerEndpoint(t.Context()) + gotHost, gotPort, err := cloud.GetLoadBalancerEndpoint(context.Background()) if tc.wantErr { assert.Error(err) return @@ -810,7 +810,7 @@ func TestList(t *testing.T) { zoneAPI: &tc.zoneAPI, } - instances, err := cloud.List(t.Context()) + instances, err := cloud.List(context.Background()) if tc.wantErr { assert.Error(err) return @@ -915,7 +915,7 @@ func TestZones(t *testing.T) { assert.Empty(cloud.zoneCache) - gotZones, err := cloud.zones(t.Context(), "someProject", "someregion-west3") + gotZones, err := cloud.zones(context.Background(), "someProject", "someregion-west3") if tc.wantErr { assert.Error(err) return @@ -1066,7 +1066,7 @@ func TestUID(t *testing.T) { instanceAPI: &tc.instanceAPI, } - uid, err := cloud.UID(t.Context()) + uid, err := cloud.UID(context.Background()) if tc.wantErr { assert.Error(err) return @@ -1170,7 +1170,7 @@ func TestInitSecretHash(t *testing.T) { instanceAPI: &tc.instanceAPI, } - initSecretHash, err := cloud.InitSecretHash(t.Context()) + initSecretHash, err := cloud.InitSecretHash(context.Background()) if tc.wantErr { assert.Error(err) return diff --git a/internal/cloud/gcp/interface.go b/internal/cloud/gcp/interface.go index ad9f131f7..e78c9861b 100644 --- a/internal/cloud/gcp/interface.go +++ b/internal/cloud/gcp/interface.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcp diff --git a/internal/cloud/gcp/wrappers.go b/internal/cloud/gcp/wrappers.go index b8d60b92a..72b46399a 100644 --- a/internal/cloud/gcp/wrappers.go +++ b/internal/cloud/gcp/wrappers.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcp diff --git a/internal/cloud/gcpshared/gcpshared.go b/internal/cloud/gcpshared/gcpshared.go index 667ad4647..93b4a41fa 100644 --- a/internal/cloud/gcpshared/gcpshared.go +++ b/internal/cloud/gcpshared/gcpshared.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cloud/gcpshared/providerid.go b/internal/cloud/gcpshared/providerid.go index 0a32af728..70c26334c 100644 --- a/internal/cloud/gcpshared/providerid.go +++ b/internal/cloud/gcpshared/providerid.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcpshared diff --git a/internal/cloud/gcpshared/providerid_test.go b/internal/cloud/gcpshared/providerid_test.go index 1ebeb8e86..61fbdb2f5 100644 --- a/internal/cloud/gcpshared/providerid_test.go +++ b/internal/cloud/gcpshared/providerid_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcpshared diff --git a/internal/cloud/gcpshared/serviceaccountkey.go b/internal/cloud/gcpshared/serviceaccountkey.go index 6908b385d..bb90cdde2 100644 --- a/internal/cloud/gcpshared/serviceaccountkey.go +++ b/internal/cloud/gcpshared/serviceaccountkey.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcpshared diff --git a/internal/cloud/gcpshared/serviceaccountkey_test.go b/internal/cloud/gcpshared/serviceaccountkey_test.go index de77f6ff2..54cf7296b 100644 --- a/internal/cloud/gcpshared/serviceaccountkey_test.go +++ b/internal/cloud/gcpshared/serviceaccountkey_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcpshared diff --git a/internal/cloud/metadata/metadata.go b/internal/cloud/metadata/metadata.go index fe2b844c5..7b3aed893 100644 --- a/internal/cloud/metadata/metadata.go +++ b/internal/cloud/metadata/metadata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package metadata diff --git a/internal/cloud/openstack/BUILD.bazel b/internal/cloud/openstack/BUILD.bazel index 719f3fe02..b42e5a6a5 100644 --- a/internal/cloud/openstack/BUILD.bazel +++ b/internal/cloud/openstack/BUILD.bazel @@ -18,12 +18,12 @@ go_library( "//internal/cloud/metadata", "//internal/constants", "//internal/role", - "@com_github_gophercloud_gophercloud_v2//:gophercloud", - "@com_github_gophercloud_gophercloud_v2//openstack/compute/v2/servers", - "@com_github_gophercloud_gophercloud_v2//openstack/networking/v2/networks", - "@com_github_gophercloud_gophercloud_v2//openstack/networking/v2/subnets", - "@com_github_gophercloud_gophercloud_v2//pagination", - "@com_github_gophercloud_utils_v2//openstack/clientconfig", + "@com_github_gophercloud_gophercloud//:gophercloud", + "@com_github_gophercloud_gophercloud//openstack/compute/v2/servers", + "@com_github_gophercloud_gophercloud//openstack/networking/v2/networks", + "@com_github_gophercloud_gophercloud//openstack/networking/v2/subnets", + "@com_github_gophercloud_gophercloud//pagination", + "@com_github_gophercloud_utils//openstack/clientconfig", ], ) @@ -40,11 +40,11 @@ go_test( deps = [ "//internal/cloud/metadata", "//internal/role", - "@com_github_gophercloud_gophercloud_v2//:gophercloud", - "@com_github_gophercloud_gophercloud_v2//openstack/compute/v2/servers", - "@com_github_gophercloud_gophercloud_v2//openstack/networking/v2/networks", - "@com_github_gophercloud_gophercloud_v2//openstack/networking/v2/subnets", - "@com_github_gophercloud_gophercloud_v2//pagination", + "@com_github_gophercloud_gophercloud//:gophercloud", + "@com_github_gophercloud_gophercloud//openstack/compute/v2/servers", + "@com_github_gophercloud_gophercloud//openstack/networking/v2/networks", + "@com_github_gophercloud_gophercloud//openstack/networking/v2/subnets", + "@com_github_gophercloud_gophercloud//pagination", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], diff --git a/internal/cloud/openstack/accountkey.go b/internal/cloud/openstack/accountkey.go index 49d359e6f..d781091e9 100644 --- a/internal/cloud/openstack/accountkey.go +++ b/internal/cloud/openstack/accountkey.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack diff --git a/internal/cloud/openstack/accountkey_test.go b/internal/cloud/openstack/accountkey_test.go index b15da3a81..e9805e1d9 100644 --- a/internal/cloud/openstack/accountkey_test.go +++ b/internal/cloud/openstack/accountkey_test.go @@ -1,6 +1,6 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack diff --git a/internal/cloud/openstack/api.go b/internal/cloud/openstack/api.go index 839a47fbb..b133d2a47 100644 --- a/internal/cloud/openstack/api.go +++ b/internal/cloud/openstack/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack @@ -10,10 +10,10 @@ import ( "context" "github.com/edgelesssys/constellation/v2/internal/role" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" - "github.com/gophercloud/gophercloud/v2/pagination" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/pagination" ) type imdsAPI interface { @@ -34,5 +34,5 @@ type serversAPI interface { } type pagerAPI interface { - AllPages(context.Context) (pagination.Page, error) + AllPages() (pagination.Page, error) } diff --git a/internal/cloud/openstack/api_test.go b/internal/cloud/openstack/api_test.go index 9acb07494..44c26c4ba 100644 --- a/internal/cloud/openstack/api_test.go +++ b/internal/cloud/openstack/api_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack @@ -10,10 +10,10 @@ import ( "context" "github.com/edgelesssys/constellation/v2/internal/role" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" - "github.com/gophercloud/gophercloud/v2/pagination" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/pagination" ) type stubIMDSClient struct { @@ -90,6 +90,6 @@ type stubPager struct { allPagesErr error } -func (p *stubPager) AllPages(_ context.Context) (pagination.Page, error) { +func (p *stubPager) AllPages() (pagination.Page, error) { return p.page, p.allPagesErr } diff --git a/internal/cloud/openstack/clouds/read.go b/internal/cloud/openstack/clouds/read.go index 1ffa4976a..d4259c338 100644 --- a/internal/cloud/openstack/clouds/read.go +++ b/internal/cloud/openstack/clouds/read.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package clouds diff --git a/internal/cloud/openstack/imds.go b/internal/cloud/openstack/imds.go index 50d255903..792a0d881 100644 --- a/internal/cloud/openstack/imds.go +++ b/internal/cloud/openstack/imds.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack @@ -172,20 +172,6 @@ func (c *imdsClient) userDomainName(ctx context.Context) (string, error) { return c.userDataCache.UserDomainName, nil } -func (c *imdsClient) regionName(ctx context.Context) (string, error) { - if c.timeForUpdate(c.cacheTime) || c.userDataCache.RegionName == "" { - if err := c.update(ctx); err != nil { - return "", err - } - } - - if c.userDataCache.RegionName == "" { - return "", errors.New("unable to get user domain name") - } - - return c.userDataCache.RegionName, nil -} - func (c *imdsClient) username(ctx context.Context) (string, error) { if c.timeForUpdate(c.cacheTime) || c.userDataCache.Username == "" { if err := c.update(ctx); err != nil { @@ -309,7 +295,6 @@ type metadataTags struct { type userDataResponse struct { AuthURL string `json:"openstack-auth-url,omitempty"` UserDomainName string `json:"openstack-user-domain-name,omitempty"` - RegionName string `json:"openstack-region-name,omitempty"` Username string `json:"openstack-username,omitempty"` Password string `json:"openstack-password,omitempty"` LoadBalancerEndpoint string `json:"openstack-load-balancer-endpoint,omitempty"` diff --git a/internal/cloud/openstack/imds_test.go b/internal/cloud/openstack/imds_test.go index f3a135bfa..ce45dbd3d 100644 --- a/internal/cloud/openstack/imds_test.go +++ b/internal/cloud/openstack/imds_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack @@ -176,7 +176,7 @@ func TestProviderID(t *testing.T) { cacheTime: tc.cacheTime, } - result, err := tu.method(imds, t.Context()) + result, err := tu.method(imds, context.Background()) if tc.wantErr { assert.Error(err) @@ -264,7 +264,7 @@ func TestRole(t *testing.T) { cacheTime: tc.cacheTime, } - result, err := imds.role(t.Context()) + result, err := imds.role(context.Background()) if tc.wantErr { assert.Error(err) @@ -336,7 +336,7 @@ func TestVPCIP(t *testing.T) { vpcIPCacheTime: tc.cacheTime, } - result, err := imds.vpcIP(t.Context()) + result, err := imds.vpcIP(context.Background()) if tc.wantErr { assert.Error(err) diff --git a/internal/cloud/openstack/openstack.go b/internal/cloud/openstack/openstack.go index 4fae03421..9472b3068 100644 --- a/internal/cloud/openstack/openstack.go +++ b/internal/cloud/openstack/openstack.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack @@ -17,10 +17,10 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/role" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" - "github.com/gophercloud/utils/v2/openstack/clientconfig" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/utils/openstack/clientconfig" ) const ( @@ -54,10 +54,6 @@ func New(ctx context.Context) (*MetadataClient, error) { if err != nil { return nil, fmt.Errorf("getting user domain name: %w", err) } - regionName, err := imds.regionName(ctx) - if err != nil { - return nil, fmt.Errorf("getting region name: %w", err) - } clientOpts := &clientconfig.ClientOpts{ AuthType: clientconfig.AuthV3Password, @@ -67,16 +63,15 @@ func New(ctx context.Context) (*MetadataClient, error) { Username: username, Password: password, }, - RegionName: regionName, } - serversClient, err := clientconfig.NewServiceClient(ctx, "compute", clientOpts) + serversClient, err := clientconfig.NewServiceClient("compute", clientOpts) if err != nil { return nil, fmt.Errorf("creating compute client: %w", err) } serversClient.Microversion = microversion - networksClient, err := clientconfig.NewServiceClient(ctx, "network", clientOpts) + networksClient, err := clientconfig.NewServiceClient("network", clientOpts) if err != nil { return nil, fmt.Errorf("creating network client: %w", err) } @@ -127,12 +122,12 @@ func (c *MetadataClient) List(ctx context.Context) ([]metadata.InstanceMetadata, uidTag := fmt.Sprintf("constellation-uid-%s", uid) - subnet, err := c.getSubnetCIDR(ctx, uidTag) + subnet, err := c.getSubnetCIDR(uidTag) if err != nil { return nil, err } - srvs, err := c.getServers(ctx, uidTag) + srvs, err := c.getServers(uidTag) if err != nil { return nil, err } @@ -245,9 +240,9 @@ func (c *MetadataClient) GetLoadBalancerEndpoint(ctx context.Context) (host, por return host, strconv.FormatInt(constants.KubernetesPort, 10), nil } -func (c *MetadataClient) getSubnetCIDR(ctx context.Context, uidTag string) (netip.Prefix, error) { +func (c *MetadataClient) getSubnetCIDR(uidTag string) (netip.Prefix, error) { listNetworksOpts := networks.ListOpts{Tags: uidTag} - networksPage, err := c.api.ListNetworks(listNetworksOpts).AllPages(ctx) + networksPage, err := c.api.ListNetworks(listNetworksOpts).AllPages() if err != nil { return netip.Prefix{}, fmt.Errorf("listing networks: %w", err) } @@ -260,7 +255,7 @@ func (c *MetadataClient) getSubnetCIDR(ctx context.Context, uidTag string) (neti } listSubnetsOpts := subnets.ListOpts{Tags: uidTag} - subnetsPage, err := c.api.ListSubnets(listSubnetsOpts).AllPages(ctx) + subnetsPage, err := c.api.ListSubnets(listSubnetsOpts).AllPages() if err != nil { return netip.Prefix{}, fmt.Errorf("listing subnets: %w", err) } @@ -290,9 +285,9 @@ func (c *MetadataClient) getSubnetCIDR(ctx context.Context, uidTag string) (neti return cidr, nil } -func (c *MetadataClient) getServers(ctx context.Context, uidTag string) ([]servers.Server, error) { +func (c *MetadataClient) getServers(uidTag string) ([]servers.Server, error) { listServersOpts := servers.ListOpts{Tags: uidTag} - serversPage, err := c.api.ListServers(listServersOpts).AllPages(ctx) + serversPage, err := c.api.ListServers(listServersOpts).AllPages() if err != nil { return nil, fmt.Errorf("listing servers: %w", err) } diff --git a/internal/cloud/openstack/openstack_test.go b/internal/cloud/openstack/openstack_test.go index 0b9ecbbf8..da8ed9d6b 100644 --- a/internal/cloud/openstack/openstack_test.go +++ b/internal/cloud/openstack/openstack_test.go @@ -1,23 +1,24 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack import ( + "context" "errors" "fmt" "testing" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/role" - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" - "github.com/gophercloud/gophercloud/v2/pagination" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/pagination" "github.com/stretchr/testify/assert" ) @@ -87,7 +88,7 @@ func TestSelf(t *testing.T) { c := &MetadataClient{imds: tc.imds} - got, err := c.Self(t.Context()) + got, err := c.Self(context.Background()) if tc.wantErr { assert.Error(err) @@ -383,7 +384,7 @@ func TestList(t *testing.T) { c := &MetadataClient{imds: tc.imds, api: tc.api} - got, err := c.List(t.Context()) + got, err := c.List(context.Background()) if tc.wantErr { assert.Error(err) @@ -417,7 +418,7 @@ func TestUID(t *testing.T) { c := &MetadataClient{imds: tc.imds} - got, err := c.UID(t.Context()) + got, err := c.UID(context.Background()) if tc.wantErr { assert.Error(err) @@ -451,7 +452,7 @@ func TestInitSecretHash(t *testing.T) { c := &MetadataClient{imds: tc.imds} - got, err := c.InitSecretHash(t.Context()) + got, err := c.InitSecretHash(context.Background()) if tc.wantErr { assert.Error(err) @@ -485,7 +486,7 @@ func TestGetLoadBalancerEndpoint(t *testing.T) { c := &MetadataClient{imds: tc.imds} - got, _, err := c.GetLoadBalancerEndpoint(t.Context()) + got, _, err := c.GetLoadBalancerEndpoint(context.Background()) if tc.wantErr { assert.Error(err) diff --git a/internal/cloud/openstack/plumbing.go b/internal/cloud/openstack/plumbing.go index f99bafe75..fa304994e 100644 --- a/internal/cloud/openstack/plumbing.go +++ b/internal/cloud/openstack/plumbing.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack diff --git a/internal/cloud/openstack/plumbing_test.go b/internal/cloud/openstack/plumbing_test.go index 40354da33..5f13fd42c 100644 --- a/internal/cloud/openstack/plumbing_test.go +++ b/internal/cloud/openstack/plumbing_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack diff --git a/internal/cloud/openstack/wrappers.go b/internal/cloud/openstack/wrappers.go index 002916541..c2c732698 100644 --- a/internal/cloud/openstack/wrappers.go +++ b/internal/cloud/openstack/wrappers.go @@ -1,16 +1,16 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package openstack import ( - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" ) type apiClient struct { diff --git a/internal/cloud/qemu/qemu.go b/internal/cloud/qemu/qemu.go index a68682501..5451342ab 100644 --- a/internal/cloud/qemu/qemu.go +++ b/internal/cloud/qemu/qemu.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/compatibility/compatibility.go b/internal/compatibility/compatibility.go index c1a4ec08e..e089a7ddd 100644 --- a/internal/compatibility/compatibility.go +++ b/internal/compatibility/compatibility.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/compatibility/compatibility_test.go b/internal/compatibility/compatibility_test.go index 48da2d4b8..35c4e7517 100644 --- a/internal/compatibility/compatibility_test.go +++ b/internal/compatibility/compatibility_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package compatibility diff --git a/internal/config/BUILD.bazel b/internal/config/BUILD.bazel index 8a96864c5..c653c489c 100644 --- a/internal/config/BUILD.bazel +++ b/internal/config/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "azure.go", "config.go", "config_doc.go", - "gcp.go", # keep "image_enterprise.go", # keep @@ -63,7 +62,6 @@ go_test( "//internal/cloud/cloudprovider", "//internal/config/instancetypes", "//internal/constants", - "//internal/encoding", "//internal/file", "//internal/semver", "//internal/versions", diff --git a/internal/config/attestation.go b/internal/config/attestation.go index 08f980681..dc4d8fb83 100644 --- a/internal/config/attestation.go +++ b/internal/config/attestation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -52,8 +52,6 @@ func UnmarshalAttestationConfig(data []byte, attestVariant variant.Variant) (Att return unmarshalTypedConfig[*AzureTDX](data) case variant.GCPSEVES{}: return unmarshalTypedConfig[*GCPSEVES](data) - case variant.GCPSEVSNP{}: - return unmarshalTypedConfig[*GCPSEVSNP](data) case variant.QEMUVTPM{}: return unmarshalTypedConfig[*QEMUVTPM](data) case variant.QEMUTDX{}: diff --git a/internal/config/attestation_test.go b/internal/config/attestation_test.go index a13562ac1..e0e3492dc 100644 --- a/internal/config/attestation_test.go +++ b/internal/config/attestation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -41,9 +41,6 @@ func TestUnmarshalAttestationConfig(t *testing.T) { "GCPSEVES": { cfg: &GCPSEVES{Measurements: measurements.DefaultsFor(cloudprovider.GCP, variant.GCPSEVES{})}, }, - "GCPSEVSNP": { - cfg: DefaultForGCPSEVSNP(), - }, "QEMUVTPM": { cfg: &QEMUVTPM{Measurements: measurements.DefaultsFor(cloudprovider.QEMU, variant.QEMUVTPM{})}, }, diff --git a/internal/config/attestationversion.go b/internal/config/attestationversion.go index c64025ca0..a7949c5c3 100644 --- a/internal/config/attestationversion.go +++ b/internal/config/attestationversion.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -9,49 +9,47 @@ package config import ( "encoding/json" "fmt" + "math" + "strconv" "strings" - - "github.com/edgelesssys/constellation/v2/internal/encoding" ) -type versionValue interface { - encoding.HexBytes | uint8 | uint16 -} - -func placeholderVersionValue[T versionValue]() T { - var placeholder T - return placeholder -} +const placeholderVersionValue = 0 // NewLatestPlaceholderVersion returns the latest version with a placeholder version value. -func NewLatestPlaceholderVersion[T versionValue]() AttestationVersion[T] { - return AttestationVersion[T]{ - Value: placeholderVersionValue[T](), +func NewLatestPlaceholderVersion() AttestationVersion { + return AttestationVersion{ + Value: placeholderVersionValue, WantLatest: true, } } -// AttestationVersion holds version information. -type AttestationVersion[T versionValue] struct { - Value T +// AttestationVersion is a type that represents a version of a SNP. +type AttestationVersion struct { + Value uint8 WantLatest bool } -// MarshalYAML implements a custom marshaller to write "latest" as the type's value, if set. -func (v AttestationVersion[T]) MarshalYAML() (any, error) { +// MarshalYAML implements a custom marshaller to resolve "latest" values. +func (v AttestationVersion) MarshalYAML() (any, error) { if v.WantLatest { return "latest", nil } return v.Value, nil } -// UnmarshalYAML implements a custom unmarshaller to resolve "latest" values. -func (v *AttestationVersion[T]) UnmarshalYAML(unmarshal func(any) error) error { - return v.unmarshal(unmarshal) +// UnmarshalYAML implements a custom unmarshaller to resolve "atest" values. +func (v *AttestationVersion) UnmarshalYAML(unmarshal func(any) error) error { + var rawUnmarshal string + if err := unmarshal(&rawUnmarshal); err != nil { + return fmt.Errorf("raw unmarshal: %w", err) + } + + return v.parseRawUnmarshal(rawUnmarshal) } -// MarshalJSON implements a custom marshaller to write "latest" as the type's value, if set. -func (v AttestationVersion[T]) MarshalJSON() ([]byte, error) { +// MarshalJSON implements a custom marshaller to resolve "latest" values. +func (v AttestationVersion) MarshalJSON() ([]byte, error) { if v.WantLatest { return json.Marshal("latest") } @@ -59,31 +57,39 @@ func (v AttestationVersion[T]) MarshalJSON() ([]byte, error) { } // UnmarshalJSON implements a custom unmarshaller to resolve "latest" values. -func (v *AttestationVersion[T]) UnmarshalJSON(data []byte) (err error) { - return v.unmarshal(func(a any) error { - return json.Unmarshal(data, a) - }) -} - -// unmarshal takes care of unmarshalling the value from YAML or JSON. -func (v *AttestationVersion[T]) unmarshal(unmarshal func(any) error) error { - // Start by trying to unmarshal to the distinct type - var distinctType T - if err := unmarshal(&distinctType); err == nil { - v.Value = distinctType - return nil - } - +func (v *AttestationVersion) UnmarshalJSON(data []byte) (err error) { + // JSON has two distinct ways to represent numbers and strings. + // This means we cannot simply unmarshal to string, like with YAML. + // Unmarshalling to `any` causes Go to unmarshal numbers to float64. + // Therefore, try to unmarshal to string, and then to int, instead of using type assertions. var unmarshalString string - if err := unmarshal(&unmarshalString); err != nil { - return fmt.Errorf("failed unmarshalling to %T or string: %w", distinctType, err) + if err := json.Unmarshal(data, &unmarshalString); err != nil { + var unmarshalInt int64 + if err := json.Unmarshal(data, &unmarshalInt); err != nil { + return fmt.Errorf("unable to unmarshal to string or int: %w", err) + } + unmarshalString = strconv.FormatInt(unmarshalInt, 10) } - if strings.ToLower(unmarshalString) == "latest" { - v.WantLatest = true - v.Value = placeholderVersionValue[T]() - return nil - } - - return fmt.Errorf("failed unmarshalling to %T or string: invalid value: %s", distinctType, unmarshalString) + return v.parseRawUnmarshal(unmarshalString) +} + +func (v *AttestationVersion) parseRawUnmarshal(str string) error { + if strings.HasPrefix(str, "0") && len(str) != 1 { + return fmt.Errorf("no format with prefixed 0 (octal, hexadecimal) allowed: %s", str) + } + if strings.ToLower(str) == "latest" { + v.WantLatest = true + v.Value = placeholderVersionValue + } else { + ui, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return fmt.Errorf("invalid version value: %s", str) + } + if ui > math.MaxUint8 { + return fmt.Errorf("integer value is out ouf uint8 range: %d", ui) + } + v.Value = uint8(ui) + } + return nil } diff --git a/internal/config/attestationversion_test.go b/internal/config/attestationversion_test.go index c731831d2..52d68e2a8 100644 --- a/internal/config/attestationversion_test.go +++ b/internal/config/attestationversion_test.go @@ -1,313 +1,210 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config import ( - "bytes" "encoding/json" "testing" - "github.com/edgelesssys/constellation/v2/internal/encoding" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestVersionMarshalYAML(t *testing.T) { - testCasesUint8 := map[string]struct { - sut AttestationVersion[uint8] + tests := map[string]struct { + sut AttestationVersion want string }{ - "version with latest writes latest": { - sut: AttestationVersion[uint8]{ + "isLatest resolves to latest": { + sut: AttestationVersion{ Value: 1, WantLatest: true, }, want: "latest\n", }, - "value 5 writes 5": { - sut: AttestationVersion[uint8]{ + "value 5 resolves to 5": { + sut: AttestationVersion{ Value: 5, WantLatest: false, }, want: "5\n", }, } - for name, tc := range testCasesUint8 { + for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert := assert.New(t) + require := require.New(t) bt, err := yaml.Marshal(tc.sut) - assert.NoError(err) - assert.Equal(tc.want, string(bt)) - }) - } - - testCasesUint16 := map[string]struct { - sut AttestationVersion[uint16] - want string - }{ - "version with latest writes latest": { - sut: AttestationVersion[uint16]{ - Value: 1, - WantLatest: true, - }, - want: "latest\n", - }, - "value 5 writes 5": { - sut: AttestationVersion[uint16]{ - Value: 5, - WantLatest: false, - }, - want: "5\n", - }, - } - for name, tc := range testCasesUint16 { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - bt, err := yaml.Marshal(tc.sut) - assert.NoError(err) - assert.Equal(tc.want, string(bt)) - }) - } - - testCasesHexBytes := map[string]struct { - sut AttestationVersion[encoding.HexBytes] - want string - }{ - "version with latest writes latest": { - sut: AttestationVersion[encoding.HexBytes]{ - Value: encoding.HexBytes(bytes.Repeat([]byte("0"), 16)), - WantLatest: true, - }, - want: "latest\n", - }, - "value 5 writes 5": { - sut: AttestationVersion[encoding.HexBytes]{ - Value: encoding.HexBytes(bytes.Repeat([]byte("A"), 16)), - WantLatest: false, - }, - want: "\"41414141414141414141414141414141\"\n", - }, - } - for name, tc := range testCasesHexBytes { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - bt, err := yaml.Marshal(tc.sut) - assert.NoError(err) - assert.Equal(tc.want, string(bt)) + require.NoError(err) + require.Equal(tc.want, string(bt)) }) } } -func TestVersionUnmarshal(t *testing.T) { - testCasesUint8 := map[string]struct { - yamlData string - jsonData string - want AttestationVersion[uint8] - wantErr bool +func TestVersionUnmarshalYAML(t *testing.T) { + tests := map[string]struct { + sut string + want AttestationVersion + wantErr bool }{ "latest resolves to isLatest": { - yamlData: "latest", - jsonData: "\"latest\"", - want: AttestationVersion[uint8]{ + sut: "latest", + want: AttestationVersion{ Value: 0, WantLatest: true, }, wantErr: false, }, "1 resolves to value 1": { - yamlData: "1", - jsonData: "1", - want: AttestationVersion[uint8]{ + sut: "1", + want: AttestationVersion{ Value: 1, WantLatest: false, }, wantErr: false, }, "max uint8+1 errors": { - yamlData: "256", - jsonData: "256", - wantErr: true, + sut: "256", + wantErr: true, }, "-1 errors": { - yamlData: "-1", - jsonData: "-1", - wantErr: true, + sut: "-1", + wantErr: true, + }, + "2.6 errors": { + sut: "2.6", + wantErr: true, + }, + "2.0 errors": { + sut: "2.0", + wantErr: true, + }, + "hex format is invalid": { + sut: "0x10", + wantErr: true, + }, + "octal format is invalid": { + sut: "010", + wantErr: true, }, "0 resolves to value 0": { - yamlData: "0", - jsonData: "0", - want: AttestationVersion[uint8]{ + sut: "0", + want: AttestationVersion{ Value: 0, WantLatest: false, }, }, + "00 errors": { + sut: "00", + wantErr: true, + }, } - for name, tc := range testCasesUint8 { + for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert := assert.New(t) + require := require.New(t) - { - var sut AttestationVersion[uint8] - err := yaml.Unmarshal([]byte(tc.yamlData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } - } - - { - var sut AttestationVersion[uint8] - err := json.Unmarshal([]byte(tc.jsonData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } - } - }) - } - - testCasesUint16 := map[string]struct { - yamlData string - jsonData string - want AttestationVersion[uint16] - wantErr bool - }{ - "latest resolves to isLatest": { - yamlData: "latest", - jsonData: "\"latest\"", - want: AttestationVersion[uint16]{ - Value: 0, - WantLatest: true, - }, - wantErr: false, - }, - "1 resolves to value 1": { - yamlData: "1", - jsonData: "1", - want: AttestationVersion[uint16]{ - Value: 1, - WantLatest: false, - }, - wantErr: false, - }, - "max uint16+1 errors": { - yamlData: "65536", - jsonData: "65536", - wantErr: true, - }, - "-1 errors": { - yamlData: "-1", - jsonData: "-1", - wantErr: true, - }, - "0 resolves to value 0": { - yamlData: "0", - jsonData: "0", - want: AttestationVersion[uint16]{ - Value: 0, - WantLatest: false, - }, - }, - } - for name, tc := range testCasesUint16 { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - { - var sut AttestationVersion[uint16] - err := yaml.Unmarshal([]byte(tc.yamlData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } - } - - { - var sut AttestationVersion[uint16] - err := json.Unmarshal([]byte(tc.jsonData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } - } - }) - } - - testCasesHexBytes := map[string]struct { - yamlData string - jsonData string - want AttestationVersion[encoding.HexBytes] - wantErr bool - }{ - "latest resolves to isLatest": { - yamlData: "latest", - jsonData: "\"latest\"", - want: AttestationVersion[encoding.HexBytes]{ - Value: encoding.HexBytes(nil), - WantLatest: true, - }, - wantErr: false, - }, - "hex string resolves to correctly": { - yamlData: "41414141414141414141414141414141", - jsonData: "\"41414141414141414141414141414141\"", - want: AttestationVersion[encoding.HexBytes]{ - Value: encoding.HexBytes(bytes.Repeat([]byte("A"), 16)), - WantLatest: false, - }, - wantErr: false, - }, - "invalid hex string": { - yamlData: "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG", - jsonData: "\"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\"", - wantErr: true, - }, - "non hex data": { - yamlData: "-15", - jsonData: "-15", - wantErr: true, - }, - } - for name, tc := range testCasesHexBytes { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - { - var sut AttestationVersion[encoding.HexBytes] - err := yaml.Unmarshal([]byte(tc.yamlData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } - } - - { - var sut AttestationVersion[encoding.HexBytes] - err := json.Unmarshal([]byte(tc.jsonData), &sut) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(tc.want, sut) - } + var sut AttestationVersion + err := yaml.Unmarshal([]byte(tc.sut), &sut) + if tc.wantErr { + require.Error(err) + return } + require.NoError(err) + require.Equal(tc.want, sut) + }) + } +} + +func TestVersionUnmarshalJSON(t *testing.T) { + tests := map[string]struct { + sut string + want AttestationVersion + wantErr bool + }{ + "latest resolves to isLatest": { + sut: `"latest"`, + want: AttestationVersion{ + Value: 0, + WantLatest: true, + }, + }, + "1 resolves to value 1": { + sut: "1", + want: AttestationVersion{ + Value: 1, + WantLatest: false, + }, + }, + "quoted number resolves to value": { + sut: `"1"`, + want: AttestationVersion{ + Value: 1, + WantLatest: false, + }, + }, + "quoted float errors": { + sut: `"1.0"`, + wantErr: true, + }, + "max uint8+1 errors": { + sut: "256", + wantErr: true, + }, + "-1 errors": { + sut: "-1", + wantErr: true, + }, + "2.6 errors": { + sut: "2.6", + wantErr: true, + }, + "2.0 errors": { + sut: "2.0", + wantErr: true, + }, + "hex format is invalid": { + sut: "0x10", + wantErr: true, + }, + "octal format is invalid": { + sut: "010", + wantErr: true, + }, + "0 resolves to value 0": { + sut: "0", + want: AttestationVersion{ + Value: 0, + WantLatest: false, + }, + }, + "quoted 0 resolves to value 0": { + sut: `"0"`, + want: AttestationVersion{ + Value: 0, + WantLatest: false, + }, + }, + "00 errors": { + sut: "00", + wantErr: true, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + var sut AttestationVersion + err := json.Unmarshal([]byte(tc.sut), &sut) + if tc.wantErr { + require.Error(err) + return + } + require.NoError(err) + require.Equal(tc.want, sut) }) } } diff --git a/internal/config/aws.go b/internal/config/aws.go index 43594342e..53b11e69f 100644 --- a/internal/config/aws.go +++ b/internal/config/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -22,10 +22,10 @@ var _ svnResolveMarshaller = &AWSSEVSNP{} func DefaultForAWSSEVSNP() *AWSSEVSNP { return &AWSSEVSNP{ Measurements: measurements.DefaultsFor(cloudprovider.AWS, variant.AWSSEVSNP{}), - BootloaderVersion: NewLatestPlaceholderVersion[uint8](), - TEEVersion: NewLatestPlaceholderVersion[uint8](), - SNPVersion: NewLatestPlaceholderVersion[uint8](), - MicrocodeVersion: NewLatestPlaceholderVersion[uint8](), + BootloaderVersion: NewLatestPlaceholderVersion(), + TEEVersion: NewLatestPlaceholderVersion(), + SNPVersion: NewLatestPlaceholderVersion(), + MicrocodeVersion: NewLatestPlaceholderVersion(), AMDRootKey: mustParsePEM(arkPEM), } } @@ -79,7 +79,7 @@ func (c *AWSSEVSNP) FetchAndSetLatestVersionNumbers(ctx context.Context, fetcher return nil } - versions, err := fetcher.FetchLatestVersion(ctx, variant.AWSSEVSNP{}) + versions, err := fetcher.FetchSEVSNPVersionLatest(ctx, variant.AWSSEVSNP{}) if err != nil { return fmt.Errorf("fetching latest TCB versions from configapi: %w", err) } diff --git a/internal/config/azure.go b/internal/config/azure.go index 3631b52c8..2b1f29a03 100644 --- a/internal/config/azure.go +++ b/internal/config/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -28,10 +28,10 @@ var ( func DefaultForAzureSEVSNP() *AzureSEVSNP { return &AzureSEVSNP{ Measurements: measurements.DefaultsFor(cloudprovider.Azure, variant.AzureSEVSNP{}), - BootloaderVersion: NewLatestPlaceholderVersion[uint8](), - TEEVersion: NewLatestPlaceholderVersion[uint8](), - SNPVersion: NewLatestPlaceholderVersion[uint8](), - MicrocodeVersion: NewLatestPlaceholderVersion[uint8](), + BootloaderVersion: NewLatestPlaceholderVersion(), + TEEVersion: NewLatestPlaceholderVersion(), + SNPVersion: NewLatestPlaceholderVersion(), + MicrocodeVersion: NewLatestPlaceholderVersion(), FirmwareSignerConfig: SNPFirmwareSignerConfig{ AcceptedKeyDigests: idkeydigest.DefaultList(), EnforcementPolicy: idkeydigest.MAAFallback, @@ -80,7 +80,7 @@ func (c *AzureSEVSNP) FetchAndSetLatestVersionNumbers(ctx context.Context, fetch return nil } - versions, err := fetcher.FetchLatestVersion(ctx, variant.AzureSEVSNP{}) + versions, err := fetcher.FetchSEVSNPVersionLatest(ctx, variant.AzureSEVSNP{}) if err != nil { return fmt.Errorf("fetching latest TCB versions from configapi: %w", err) } @@ -142,14 +142,12 @@ func DefaultForAzureTDX() *AzureTDX { return &AzureTDX{ Measurements: measurements.DefaultsFor(cloudprovider.Azure, variant.AzureTDX{}), // TODO(AB#3798): Enable latest versioning for Azure TDX - QESVN: NewLatestPlaceholderVersion[uint16](), - PCESVN: NewLatestPlaceholderVersion[uint16](), - TEETCBSVN: NewLatestPlaceholderVersion[encoding.HexBytes](), - QEVendorID: NewLatestPlaceholderVersion[encoding.HexBytes](), - // Don't set a default for MRSEAM as it effectively prevents upgrading the SEAM module - // Quote verification still makes sure the module comes from Intel (through MRSIGNERSEAM), and is not of a lower version than expected - // MRSeam: nil, - XFAM: NewLatestPlaceholderVersion[encoding.HexBytes](), + QESVN: 0, + PCESVN: 0, + TEETCBSVN: encoding.HexBytes{0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + QEVendorID: encoding.HexBytes{0x93, 0x9a, 0x72, 0x33, 0xf7, 0x9c, 0x4c, 0xa9, 0x94, 0x0a, 0x0d, 0xb3, 0x95, 0x7f, 0x06, 0x07}, + MRSeam: encoding.HexBytes{0x36, 0x03, 0x04, 0xd3, 0x4a, 0x16, 0xaa, 0xce, 0x0a, 0x18, 0xe0, 0x9a, 0xd2, 0xd0, 0x7d, 0x2b, 0x9f, 0xd3, 0xc1, 0x74, 0x37, 0x8e, 0x5b, 0xf1, 0x08, 0x38, 0x80, 0x79, 0x82, 0x7f, 0x89, 0xff, 0x62, 0xac, 0xc5, 0xf8, 0xc4, 0x73, 0xdd, 0x40, 0x70, 0x63, 0x24, 0x83, 0x4e, 0x20, 0x29, 0x46}, + XFAM: encoding.HexBytes{0xe7, 0x18, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00}, IntelRootKey: mustParsePEM(tdxRootPEM), } @@ -179,43 +177,9 @@ func (c AzureTDX) EqualTo(other AttestationCfg) (bool, error) { return c.Measurements.EqualTo(otherCfg.Measurements), nil } -// FetchAndSetLatestVersionNumbers fetches the latest version numbers from the configapi and sets them. -func (c *AzureTDX) FetchAndSetLatestVersionNumbers(ctx context.Context, fetcher attestationconfigapi.Fetcher) error { - // Only talk to the API if at least one version number is set to latest. - if !(c.PCESVN.WantLatest || c.QESVN.WantLatest || c.TEETCBSVN.WantLatest || c.QEVendorID.WantLatest || c.XFAM.WantLatest) { - return nil - } - - versions, err := fetcher.FetchLatestVersion(ctx, variant.AzureTDX{}) - if err != nil { - return fmt.Errorf("fetching latest TCB versions from configapi: %w", err) - } - - // set values and keep WantLatest flag - if c.PCESVN.WantLatest { - c.PCESVN.Value = versions.PCESVN - } - if c.QESVN.WantLatest { - c.QESVN.Value = versions.QESVN - } - if c.TEETCBSVN.WantLatest { - c.TEETCBSVN.Value = versions.TEETCBSVN[:] - } - if c.QEVendorID.WantLatest { - c.QEVendorID.Value = versions.QEVendorID[:] - } - if c.XFAM.WantLatest { - c.XFAM.Value = versions.XFAM[:] - } - return nil -} - func (c *AzureTDX) getToMarshallLatestWithResolvedVersions() AttestationCfg { cp := *c - cp.PCESVN.WantLatest = false - cp.QESVN.WantLatest = false - cp.TEETCBSVN.WantLatest = false - cp.QEVendorID.WantLatest = false - cp.XFAM.WantLatest = false + // TODO: We probably want to support "latest" pseudo versioning for Azure TDX + // But we should decide on which claims can be reliably used for attestation first return &cp } diff --git a/internal/config/config.go b/internal/config/config.go index 4397fa3fa..10ac013d1 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // This binary can be build from siderolabs/talos projects. Located at: @@ -89,17 +89,14 @@ type Config struct { // The Kubernetes Service CIDR to be used for the cluster. This value will only be used during the first initialization of the Constellation. ServiceCIDR string `yaml:"serviceCIDR" validate:"omitempty,cidrv4"` // description: | - // Additional tags that are applied to created resources. - Tags cloudprovider.Tags `yaml:"tags" validate:"omitempty"` - // description: | // Supported cloud providers and their specific configurations. - Provider ProviderConfig `yaml:"provider"` + Provider ProviderConfig `yaml:"provider" validate:"dive"` // description: | // Node groups to be created in the cluster. NodeGroups map[string]NodeGroup `yaml:"nodeGroups" validate:"required,dive"` // description: | // Configuration for attestation validation. This configuration provides sensible defaults for the Constellation version it was created for.\nSee the docs for an overview on attestation: https://docs.edgeless.systems/constellation/architecture/attestation - Attestation AttestationConfig `yaml:"attestation"` + Attestation AttestationConfig `yaml:"attestation" validate:"dive"` } // ProviderConfig are cloud-provider specific configuration values used by the CLI. @@ -108,19 +105,19 @@ type Config struct { type ProviderConfig struct { // description: | // Configuration for AWS as provider. - AWS *AWSConfig `yaml:"aws,omitempty" validate:"omitempty"` + AWS *AWSConfig `yaml:"aws,omitempty" validate:"omitempty,dive"` // description: | // Configuration for Azure as provider. - Azure *AzureConfig `yaml:"azure,omitempty" validate:"omitempty"` + Azure *AzureConfig `yaml:"azure,omitempty" validate:"omitempty,dive"` // description: | // Configuration for Google Cloud as provider. - GCP *GCPConfig `yaml:"gcp,omitempty" validate:"omitempty"` + GCP *GCPConfig `yaml:"gcp,omitempty" validate:"omitempty,dive"` // description: | // Configuration for OpenStack as provider. - OpenStack *OpenStackConfig `yaml:"openstack,omitempty" validate:"omitempty"` + OpenStack *OpenStackConfig `yaml:"openstack,omitempty" validate:"omitempty,dive"` // description: | // Configuration for QEMU as provider. - QEMU *QEMUConfig `yaml:"qemu,omitempty" validate:"omitempty"` + QEMU *QEMUConfig `yaml:"qemu,omitempty" validate:"omitempty,dive"` } // AWSConfig are AWS specific configuration values used by the CLI. @@ -188,9 +185,6 @@ type GCPConfig struct { // Path of service account key file. For required service account roles, see https://docs.edgeless.systems/constellation/getting-started/install#authorization ServiceAccountKeyPath string `yaml:"serviceAccountKeyPath" validate:"required"` // description: | - // GCP service account mail address. This is being attached to the VMs for authorization. - IAMServiceAccountVM string `yaml:"IAMServiceAccountVM"` - // description: | // Deploy Persistent Disk CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage DeployCSIDriver *bool `yaml:"deployCSIDriver" validate:"required"` // description: | @@ -267,31 +261,28 @@ type QEMUConfig struct { type AttestationConfig struct { // description: | // AWS SEV-SNP attestation. - AWSSEVSNP *AWSSEVSNP `yaml:"awsSEVSNP,omitempty" validate:"omitempty"` + AWSSEVSNP *AWSSEVSNP `yaml:"awsSEVSNP,omitempty" validate:"omitempty,dive"` // description: | // AWS Nitro TPM attestation. - AWSNitroTPM *AWSNitroTPM `yaml:"awsNitroTPM,omitempty" validate:"omitempty"` + AWSNitroTPM *AWSNitroTPM `yaml:"awsNitroTPM,omitempty" validate:"omitempty,dive"` // description: | // Azure SEV-SNP attestation.\nFor details see: https://docs.edgeless.systems/constellation/architecture/attestation#cvm-verification - AzureSEVSNP *AzureSEVSNP `yaml:"azureSEVSNP,omitempty" validate:"omitempty"` + AzureSEVSNP *AzureSEVSNP `yaml:"azureSEVSNP,omitempty" validate:"omitempty,dive"` // description: | // Azure TDX attestation. - AzureTDX *AzureTDX `yaml:"azureTDX,omitempty" validate:"omitempty"` + AzureTDX *AzureTDX `yaml:"azureTDX,omitempty" validate:"omitempty,dive"` // description: | // Azure TPM attestation (Trusted Launch). - AzureTrustedLaunch *AzureTrustedLaunch `yaml:"azureTrustedLaunch,omitempty" validate:"omitempty"` + AzureTrustedLaunch *AzureTrustedLaunch `yaml:"azureTrustedLaunch,omitempty" validate:"omitempty,dive"` // description: | // GCP SEV-ES attestation. - GCPSEVES *GCPSEVES `yaml:"gcpSEVES,omitempty" validate:"omitempty"` - // description: | - // GCP SEV-SNP attestation. - GCPSEVSNP *GCPSEVSNP `yaml:"gcpSEVSNP,omitempty" validate:"omitempty"` + GCPSEVES *GCPSEVES `yaml:"gcpSEVES,omitempty" validate:"omitempty,dive"` // description: | // QEMU tdx attestation. - QEMUTDX *QEMUTDX `yaml:"qemuTDX,omitempty" validate:"omitempty"` + QEMUTDX *QEMUTDX `yaml:"qemuTDX,omitempty" validate:"omitempty,dive"` // description: | // QEMU vTPM attestation. - QEMUVTPM *QEMUVTPM `yaml:"qemuVTPM,omitempty" validate:"omitempty"` + QEMUVTPM *QEMUVTPM `yaml:"qemuVTPM,omitempty" validate:"omitempty,dive"` } // NodeGroup defines a group of nodes with the same role and configuration. @@ -328,7 +319,6 @@ func Default() *Config { KubernetesVersion: versions.Default, DebugCluster: toPtr(false), ServiceCIDR: "10.96.0.0/12", - Tags: cloudprovider.Tags{}, Provider: ProviderConfig{ AWS: &AWSConfig{ Region: "", @@ -352,7 +342,6 @@ func Default() *Config { Region: "", Zone: "", ServiceAccountKeyPath: "", - IAMServiceAccountVM: "", DeployCSIDriver: toPtr(true), UseMarketplaceImage: toPtr(false), }, @@ -401,7 +390,6 @@ func Default() *Config { AzureTDX: DefaultForAzureTDX(), AzureTrustedLaunch: &AzureTrustedLaunch{Measurements: measurements.DefaultsFor(cloudprovider.Azure, variant.AzureTrustedLaunch{})}, GCPSEVES: &GCPSEVES{Measurements: measurements.DefaultsFor(cloudprovider.GCP, variant.GCPSEVES{})}, - GCPSEVSNP: DefaultForGCPSEVSNP(), QEMUVTPM: &QEMUVTPM{Measurements: measurements.DefaultsFor(cloudprovider.QEMU, variant.QEMUVTPM{})}, }, } @@ -472,27 +460,17 @@ func New(fileHandler file.Handler, name string, fetcher attestationconfigapi.Fet return nil, err } - // Replace "latest" placeholders for attestation version numbers with the actual latest version numbers from config API if azure := c.Attestation.AzureSEVSNP; azure != nil { if err := azure.FetchAndSetLatestVersionNumbers(context.Background(), fetcher); err != nil { return c, err } } - if azure := c.Attestation.AzureTDX; azure != nil { - if err := azure.FetchAndSetLatestVersionNumbers(context.Background(), fetcher); err != nil { - return c, err - } - } + if aws := c.Attestation.AWSSEVSNP; aws != nil { if err := aws.FetchAndSetLatestVersionNumbers(context.Background(), fetcher); err != nil { return c, err } } - if gcp := c.Attestation.GCPSEVSNP; gcp != nil { - if err := gcp.FetchAndSetLatestVersionNumbers(context.Background(), fetcher); err != nil { - return c, err - } - } // Read secrets from env-vars. clientSecretValue := os.Getenv(constants.EnvVarAzureClientSecretValue) @@ -540,9 +518,6 @@ func (c *Config) UpdateMeasurements(newMeasurements measurements.M) { if c.Attestation.GCPSEVES != nil { c.Attestation.GCPSEVES.Measurements.CopyFrom(newMeasurements) } - if c.Attestation.GCPSEVSNP != nil { - c.Attestation.GCPSEVSNP.Measurements.CopyFrom(newMeasurements) - } if c.Attestation.QEMUVTPM != nil { c.Attestation.QEMUVTPM.Measurements.CopyFrom(newMeasurements) } @@ -595,8 +570,6 @@ func (c *Config) SetAttestation(attestation variant.Variant) { c.Attestation = AttestationConfig{AzureTrustedLaunch: currentAttestationConfigs.AzureTrustedLaunch} case variant.GCPSEVES: c.Attestation = AttestationConfig{GCPSEVES: currentAttestationConfigs.GCPSEVES} - case variant.GCPSEVSNP: - c.Attestation = AttestationConfig{GCPSEVSNP: currentAttestationConfigs.GCPSEVSNP} case variant.QEMUVTPM: c.Attestation = AttestationConfig{QEMUVTPM: currentAttestationConfigs.QEMUVTPM} } @@ -664,9 +637,6 @@ func (c *Config) GetAttestationConfig() AttestationCfg { if c.Attestation.GCPSEVES != nil { return c.Attestation.GCPSEVES } - if c.Attestation.GCPSEVSNP != nil { - return c.Attestation.GCPSEVSNP - } if c.Attestation.QEMUVTPM != nil { return c.Attestation.QEMUVTPM } @@ -994,29 +964,28 @@ type GCPSEVES struct { Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` } -// GCPSEVSNP is the configuration for GCP SEV-SNP attestation. -type GCPSEVSNP struct { - // description: | - // Expected TPM measurements. - Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` - // description: | - // Lowest acceptable bootloader version. - BootloaderVersion AttestationVersion[uint8] `json:"bootloaderVersion" yaml:"bootloaderVersion"` - // description: | - // Lowest acceptable TEE version. - TEEVersion AttestationVersion[uint8] `json:"teeVersion" yaml:"teeVersion"` - // description: | - // Lowest acceptable SEV-SNP version. - SNPVersion AttestationVersion[uint8] `json:"snpVersion" yaml:"snpVersion"` - // description: | - // Lowest acceptable microcode version. - MicrocodeVersion AttestationVersion[uint8] `json:"microcodeVersion" yaml:"microcodeVersion"` - // description: | - // AMD Root Key certificate used to verify the SEV-SNP certificate chain. - AMDRootKey Certificate `json:"amdRootKey" yaml:"amdRootKey"` - // description: | - // AMD Signing Key certificate used to verify the SEV-SNP VCEK / VLEK certificate. - AMDSigningKey Certificate `json:"amdSigningKey,omitempty" yaml:"amdSigningKey,omitempty"` +// GetVariant returns gcp-sev-es as the variant. +func (GCPSEVES) GetVariant() variant.Variant { + return variant.GCPSEVES{} +} + +// GetMeasurements returns the measurements used for attestation. +func (c GCPSEVES) GetMeasurements() measurements.M { + return c.Measurements +} + +// SetMeasurements updates a config's measurements using the given measurements. +func (c *GCPSEVES) SetMeasurements(m measurements.M) { + c.Measurements = m +} + +// EqualTo returns true if the config is equal to the given config. +func (c GCPSEVES) EqualTo(other AttestationCfg) (bool, error) { + otherCfg, ok := other.(*GCPSEVES) + if !ok { + return false, fmt.Errorf("cannot compare %T with %T", c, other) + } + return c.Measurements.EqualTo(otherCfg.Measurements), nil } // QEMUVTPM is the configuration for QEMU vTPM attestation. @@ -1088,16 +1057,16 @@ type AWSSEVSNP struct { Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` // description: | // Lowest acceptable bootloader version. - BootloaderVersion AttestationVersion[uint8] `json:"bootloaderVersion" yaml:"bootloaderVersion"` + BootloaderVersion AttestationVersion `json:"bootloaderVersion" yaml:"bootloaderVersion"` // description: | // Lowest acceptable TEE version. - TEEVersion AttestationVersion[uint8] `json:"teeVersion" yaml:"teeVersion"` + TEEVersion AttestationVersion `json:"teeVersion" yaml:"teeVersion"` // description: | // Lowest acceptable SEV-SNP version. - SNPVersion AttestationVersion[uint8] `json:"snpVersion" yaml:"snpVersion"` + SNPVersion AttestationVersion `json:"snpVersion" yaml:"snpVersion"` // description: | // Lowest acceptable microcode version. - MicrocodeVersion AttestationVersion[uint8] `json:"microcodeVersion" yaml:"microcodeVersion"` + MicrocodeVersion AttestationVersion `json:"microcodeVersion" yaml:"microcodeVersion"` // description: | // AMD Root Key certificate used to verify the SEV-SNP certificate chain. AMDRootKey Certificate `json:"amdRootKey" yaml:"amdRootKey"` @@ -1120,16 +1089,16 @@ type AzureSEVSNP struct { Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` // description: | // Lowest acceptable bootloader version. - BootloaderVersion AttestationVersion[uint8] `json:"bootloaderVersion" yaml:"bootloaderVersion"` + BootloaderVersion AttestationVersion `json:"bootloaderVersion" yaml:"bootloaderVersion"` // description: | // Lowest acceptable TEE version. - TEEVersion AttestationVersion[uint8] `json:"teeVersion" yaml:"teeVersion"` + TEEVersion AttestationVersion `json:"teeVersion" yaml:"teeVersion"` // description: | // Lowest acceptable SEV-SNP version. - SNPVersion AttestationVersion[uint8] `json:"snpVersion" yaml:"snpVersion"` + SNPVersion AttestationVersion `json:"snpVersion" yaml:"snpVersion"` // description: | // Lowest acceptable microcode version. - MicrocodeVersion AttestationVersion[uint8] `json:"microcodeVersion" yaml:"microcodeVersion"` + MicrocodeVersion AttestationVersion `json:"microcodeVersion" yaml:"microcodeVersion"` // description: | // Configuration for validating the firmware signature. FirmwareSignerConfig SNPFirmwareSignerConfig `json:"firmwareSignerConfig" yaml:"firmwareSignerConfig"` @@ -1138,7 +1107,7 @@ type AzureSEVSNP struct { AMDRootKey Certificate `json:"amdRootKey" yaml:"amdRootKey"` // description: | // AMD Signing Key certificate used to verify the SEV-SNP VCEK / VLEK certificate. - AMDSigningKey Certificate `json:"amdSigningKey,omitempty" yaml:"amdSigningKey,omitempty"` + AMDSigningKey Certificate `json:"amdSigningKey,omitempty" yaml:"amdSigningKey,omitempty" validate:"len=0"` } // AzureTrustedLaunch is the configuration for Azure Trusted Launch attestation. @@ -1155,22 +1124,22 @@ type AzureTDX struct { Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` // description: | // Minimum required QE security version number (SVN). - QESVN AttestationVersion[uint16] `json:"qeSVN" yaml:"qeSVN"` + QESVN uint16 `json:"qeSVN" yaml:"qeSVN"` // description: | // Minimum required PCE security version number (SVN). - PCESVN AttestationVersion[uint16] `json:"pceSVN" yaml:"pceSVN"` + PCESVN uint16 `json:"pceSVN" yaml:"pceSVN"` // description: | // Component-wise minimum required 16 byte hex-encoded TEE_TCB security version number (SVN). - TEETCBSVN AttestationVersion[encoding.HexBytes] `json:"teeTCBSVN" yaml:"teeTCBSVN"` + TEETCBSVN encoding.HexBytes `json:"teeTCBSVN" yaml:"teeTCBSVN"` // description: | // Expected 16 byte hex-encoded QE_VENDOR_ID field. - QEVendorID AttestationVersion[encoding.HexBytes] `json:"qeVendorID" yaml:"qeVendorID"` + QEVendorID encoding.HexBytes `json:"qeVendorID" yaml:"qeVendorID"` // description: | // Expected 48 byte hex-encoded MR_SEAM value. - MRSeam encoding.HexBytes `json:"mrSeam,omitempty" yaml:"mrSeam,omitempty"` + MRSeam encoding.HexBytes `json:"mrSeam" yaml:"mrSeam"` // description: | - // Expected 8 byte hex-encoded eXtended Features Available Mask (XFAM) field. Defaults to the latest available XFAM on Azure VMs. Unset to disable validation. - XFAM AttestationVersion[encoding.HexBytes] `json:"xfam" yaml:"xfam"` + // Expected 8 byte hex-encoded XFAM field. + XFAM encoding.HexBytes `json:"xfam" yaml:"xfam"` // description: | // Intel Root Key certificate used to verify the TDX certificate chain. IntelRootKey Certificate `json:"intelRootKey" yaml:"intelRootKey"` diff --git a/internal/config/config_doc.go b/internal/config/config_doc.go index b87db6b86..2168b7f98 100644 --- a/internal/config/config_doc.go +++ b/internal/config/config_doc.go @@ -23,7 +23,6 @@ var ( UnsupportedAppRegistrationErrorDoc encoder.Doc SNPFirmwareSignerConfigDoc encoder.Doc GCPSEVESDoc encoder.Doc - GCPSEVSNPDoc encoder.Doc QEMUVTPMDoc encoder.Doc QEMUTDXDoc encoder.Doc AWSSEVSNPDoc encoder.Doc @@ -37,7 +36,7 @@ func init() { ConfigDoc.Type = "Config" ConfigDoc.Comments[encoder.LineComment] = "Config defines configuration used by CLI." ConfigDoc.Description = "Config defines configuration used by CLI." - ConfigDoc.Fields = make([]encoder.Doc, 13) + ConfigDoc.Fields = make([]encoder.Doc, 12) ConfigDoc.Fields[0].Name = "version" ConfigDoc.Fields[0].Type = "string" ConfigDoc.Fields[0].Note = "" @@ -83,26 +82,21 @@ func init() { ConfigDoc.Fields[8].Note = "" ConfigDoc.Fields[8].Description = "The Kubernetes Service CIDR to be used for the cluster. This value will only be used during the first initialization of the Constellation." ConfigDoc.Fields[8].Comments[encoder.LineComment] = "The Kubernetes Service CIDR to be used for the cluster. This value will only be used during the first initialization of the Constellation." - ConfigDoc.Fields[9].Name = "tags" - ConfigDoc.Fields[9].Type = "Tags" + ConfigDoc.Fields[9].Name = "provider" + ConfigDoc.Fields[9].Type = "ProviderConfig" ConfigDoc.Fields[9].Note = "" - ConfigDoc.Fields[9].Description = "Additional tags that are applied to created resources." - ConfigDoc.Fields[9].Comments[encoder.LineComment] = "Additional tags that are applied to created resources." - ConfigDoc.Fields[10].Name = "provider" - ConfigDoc.Fields[10].Type = "ProviderConfig" + ConfigDoc.Fields[9].Description = "Supported cloud providers and their specific configurations." + ConfigDoc.Fields[9].Comments[encoder.LineComment] = "Supported cloud providers and their specific configurations." + ConfigDoc.Fields[10].Name = "nodeGroups" + ConfigDoc.Fields[10].Type = "map[string]NodeGroup" ConfigDoc.Fields[10].Note = "" - ConfigDoc.Fields[10].Description = "Supported cloud providers and their specific configurations." - ConfigDoc.Fields[10].Comments[encoder.LineComment] = "Supported cloud providers and their specific configurations." - ConfigDoc.Fields[11].Name = "nodeGroups" - ConfigDoc.Fields[11].Type = "map[string]NodeGroup" + ConfigDoc.Fields[10].Description = "Node groups to be created in the cluster." + ConfigDoc.Fields[10].Comments[encoder.LineComment] = "Node groups to be created in the cluster." + ConfigDoc.Fields[11].Name = "attestation" + ConfigDoc.Fields[11].Type = "AttestationConfig" ConfigDoc.Fields[11].Note = "" - ConfigDoc.Fields[11].Description = "Node groups to be created in the cluster." - ConfigDoc.Fields[11].Comments[encoder.LineComment] = "Node groups to be created in the cluster." - ConfigDoc.Fields[12].Name = "attestation" - ConfigDoc.Fields[12].Type = "AttestationConfig" - ConfigDoc.Fields[12].Note = "" - ConfigDoc.Fields[12].Description = "Configuration for attestation validation. This configuration provides sensible defaults for the Constellation version it was created for.\nSee the docs for an overview on attestation: https://docs.edgeless.systems/constellation/architecture/attestation" - ConfigDoc.Fields[12].Comments[encoder.LineComment] = "Configuration for attestation validation. This configuration provides sensible defaults for the Constellation version it was created for.\nSee the docs for an overview on attestation: https://docs.edgeless.systems/constellation/architecture/attestation" + ConfigDoc.Fields[11].Description = "Configuration for attestation validation. This configuration provides sensible defaults for the Constellation version it was created for.\nSee the docs for an overview on attestation: https://docs.edgeless.systems/constellation/architecture/attestation" + ConfigDoc.Fields[11].Comments[encoder.LineComment] = "Configuration for attestation validation. This configuration provides sensible defaults for the Constellation version it was created for.\nSee the docs for an overview on attestation: https://docs.edgeless.systems/constellation/architecture/attestation" ProviderConfigDoc.Type = "ProviderConfig" ProviderConfigDoc.Comments[encoder.LineComment] = "ProviderConfig are cloud-provider specific configuration values used by the CLI." @@ -241,7 +235,7 @@ func init() { FieldName: "gcp", }, } - GCPConfigDoc.Fields = make([]encoder.Doc, 7) + GCPConfigDoc.Fields = make([]encoder.Doc, 6) GCPConfigDoc.Fields[0].Name = "project" GCPConfigDoc.Fields[0].Type = "string" GCPConfigDoc.Fields[0].Note = "" @@ -262,21 +256,16 @@ func init() { GCPConfigDoc.Fields[3].Note = "" GCPConfigDoc.Fields[3].Description = "Path of service account key file. For required service account roles, see https://docs.edgeless.systems/constellation/getting-started/install#authorization" GCPConfigDoc.Fields[3].Comments[encoder.LineComment] = "Path of service account key file. For required service account roles, see https://docs.edgeless.systems/constellation/getting-started/install#authorization" - GCPConfigDoc.Fields[4].Name = "IAMServiceAccountVM" - GCPConfigDoc.Fields[4].Type = "string" + GCPConfigDoc.Fields[4].Name = "deployCSIDriver" + GCPConfigDoc.Fields[4].Type = "bool" GCPConfigDoc.Fields[4].Note = "" - GCPConfigDoc.Fields[4].Description = "GCP service account mail address. This is being attached to the VMs for authorization." - GCPConfigDoc.Fields[4].Comments[encoder.LineComment] = "GCP service account mail address. This is being attached to the VMs for authorization." - GCPConfigDoc.Fields[5].Name = "deployCSIDriver" + GCPConfigDoc.Fields[4].Description = "Deploy Persistent Disk CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage" + GCPConfigDoc.Fields[4].Comments[encoder.LineComment] = "Deploy Persistent Disk CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage" + GCPConfigDoc.Fields[5].Name = "useMarketplaceImage" GCPConfigDoc.Fields[5].Type = "bool" GCPConfigDoc.Fields[5].Note = "" - GCPConfigDoc.Fields[5].Description = "Deploy Persistent Disk CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage" - GCPConfigDoc.Fields[5].Comments[encoder.LineComment] = "Deploy Persistent Disk CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage" - GCPConfigDoc.Fields[6].Name = "useMarketplaceImage" - GCPConfigDoc.Fields[6].Type = "bool" - GCPConfigDoc.Fields[6].Note = "" - GCPConfigDoc.Fields[6].Description = "Use the specified GCP Marketplace image offering." - GCPConfigDoc.Fields[6].Comments[encoder.LineComment] = "Use the specified GCP Marketplace image offering." + GCPConfigDoc.Fields[5].Description = "Use the specified GCP Marketplace image offering." + GCPConfigDoc.Fields[5].Comments[encoder.LineComment] = "Use the specified GCP Marketplace image offering." OpenStackConfigDoc.Type = "OpenStackConfig" OpenStackConfigDoc.Comments[encoder.LineComment] = "OpenStackConfig holds config information for OpenStack based Constellation deployments." @@ -399,7 +388,7 @@ func init() { FieldName: "attestation", }, } - AttestationConfigDoc.Fields = make([]encoder.Doc, 9) + AttestationConfigDoc.Fields = make([]encoder.Doc, 8) AttestationConfigDoc.Fields[0].Name = "awsSEVSNP" AttestationConfigDoc.Fields[0].Type = "AWSSEVSNP" AttestationConfigDoc.Fields[0].Note = "" @@ -430,21 +419,16 @@ func init() { AttestationConfigDoc.Fields[5].Note = "" AttestationConfigDoc.Fields[5].Description = "GCP SEV-ES attestation." AttestationConfigDoc.Fields[5].Comments[encoder.LineComment] = "GCP SEV-ES attestation." - AttestationConfigDoc.Fields[6].Name = "gcpSEVSNP" - AttestationConfigDoc.Fields[6].Type = "GCPSEVSNP" + AttestationConfigDoc.Fields[6].Name = "qemuTDX" + AttestationConfigDoc.Fields[6].Type = "QEMUTDX" AttestationConfigDoc.Fields[6].Note = "" - AttestationConfigDoc.Fields[6].Description = "GCP SEV-SNP attestation." - AttestationConfigDoc.Fields[6].Comments[encoder.LineComment] = "GCP SEV-SNP attestation." - AttestationConfigDoc.Fields[7].Name = "qemuTDX" - AttestationConfigDoc.Fields[7].Type = "QEMUTDX" + AttestationConfigDoc.Fields[6].Description = "QEMU tdx attestation." + AttestationConfigDoc.Fields[6].Comments[encoder.LineComment] = "QEMU tdx attestation." + AttestationConfigDoc.Fields[7].Name = "qemuVTPM" + AttestationConfigDoc.Fields[7].Type = "QEMUVTPM" AttestationConfigDoc.Fields[7].Note = "" - AttestationConfigDoc.Fields[7].Description = "QEMU tdx attestation." - AttestationConfigDoc.Fields[7].Comments[encoder.LineComment] = "QEMU tdx attestation." - AttestationConfigDoc.Fields[8].Name = "qemuVTPM" - AttestationConfigDoc.Fields[8].Type = "QEMUVTPM" - AttestationConfigDoc.Fields[8].Note = "" - AttestationConfigDoc.Fields[8].Description = "QEMU vTPM attestation." - AttestationConfigDoc.Fields[8].Comments[encoder.LineComment] = "QEMU vTPM attestation." + AttestationConfigDoc.Fields[7].Description = "QEMU vTPM attestation." + AttestationConfigDoc.Fields[7].Comments[encoder.LineComment] = "QEMU vTPM attestation." NodeGroupDoc.Type = "NodeGroup" NodeGroupDoc.Comments[encoder.LineComment] = "NodeGroup defines a group of nodes with the same role and configuration." @@ -534,52 +518,6 @@ func init() { GCPSEVESDoc.Fields[0].Description = "Expected TPM measurements." GCPSEVESDoc.Fields[0].Comments[encoder.LineComment] = "Expected TPM measurements." - GCPSEVSNPDoc.Type = "GCPSEVSNP" - GCPSEVSNPDoc.Comments[encoder.LineComment] = "GCPSEVSNP is the configuration for GCP SEV-SNP attestation." - GCPSEVSNPDoc.Description = "GCPSEVSNP is the configuration for GCP SEV-SNP attestation." - GCPSEVSNPDoc.AppearsIn = []encoder.Appearance{ - { - TypeName: "AttestationConfig", - FieldName: "gcpSEVSNP", - }, - } - GCPSEVSNPDoc.Fields = make([]encoder.Doc, 7) - GCPSEVSNPDoc.Fields[0].Name = "measurements" - GCPSEVSNPDoc.Fields[0].Type = "M" - GCPSEVSNPDoc.Fields[0].Note = "" - GCPSEVSNPDoc.Fields[0].Description = "Expected TPM measurements." - GCPSEVSNPDoc.Fields[0].Comments[encoder.LineComment] = "Expected TPM measurements." - GCPSEVSNPDoc.Fields[1].Name = "bootloaderVersion" - GCPSEVSNPDoc.Fields[1].Type = "" - GCPSEVSNPDoc.Fields[1].Note = "" - GCPSEVSNPDoc.Fields[1].Description = "Lowest acceptable bootloader version." - GCPSEVSNPDoc.Fields[1].Comments[encoder.LineComment] = "Lowest acceptable bootloader version." - GCPSEVSNPDoc.Fields[2].Name = "teeVersion" - GCPSEVSNPDoc.Fields[2].Type = "" - GCPSEVSNPDoc.Fields[2].Note = "" - GCPSEVSNPDoc.Fields[2].Description = "Lowest acceptable TEE version." - GCPSEVSNPDoc.Fields[2].Comments[encoder.LineComment] = "Lowest acceptable TEE version." - GCPSEVSNPDoc.Fields[3].Name = "snpVersion" - GCPSEVSNPDoc.Fields[3].Type = "" - GCPSEVSNPDoc.Fields[3].Note = "" - GCPSEVSNPDoc.Fields[3].Description = "Lowest acceptable SEV-SNP version." - GCPSEVSNPDoc.Fields[3].Comments[encoder.LineComment] = "Lowest acceptable SEV-SNP version." - GCPSEVSNPDoc.Fields[4].Name = "microcodeVersion" - GCPSEVSNPDoc.Fields[4].Type = "" - GCPSEVSNPDoc.Fields[4].Note = "" - GCPSEVSNPDoc.Fields[4].Description = "Lowest acceptable microcode version." - GCPSEVSNPDoc.Fields[4].Comments[encoder.LineComment] = "Lowest acceptable microcode version." - GCPSEVSNPDoc.Fields[5].Name = "amdRootKey" - GCPSEVSNPDoc.Fields[5].Type = "Certificate" - GCPSEVSNPDoc.Fields[5].Note = "" - GCPSEVSNPDoc.Fields[5].Description = "AMD Root Key certificate used to verify the SEV-SNP certificate chain." - GCPSEVSNPDoc.Fields[5].Comments[encoder.LineComment] = "AMD Root Key certificate used to verify the SEV-SNP certificate chain." - GCPSEVSNPDoc.Fields[6].Name = "amdSigningKey" - GCPSEVSNPDoc.Fields[6].Type = "Certificate" - GCPSEVSNPDoc.Fields[6].Note = "" - GCPSEVSNPDoc.Fields[6].Description = "AMD Signing Key certificate used to verify the SEV-SNP VCEK / VLEK certificate." - GCPSEVSNPDoc.Fields[6].Comments[encoder.LineComment] = "AMD Signing Key certificate used to verify the SEV-SNP VCEK / VLEK certificate." - QEMUVTPMDoc.Type = "QEMUVTPM" QEMUVTPMDoc.Comments[encoder.LineComment] = "QEMUVTPM is the configuration for QEMU vTPM attestation." QEMUVTPMDoc.Description = "QEMUVTPM is the configuration for QEMU vTPM attestation." @@ -628,22 +566,22 @@ func init() { AWSSEVSNPDoc.Fields[0].Description = "Expected TPM measurements." AWSSEVSNPDoc.Fields[0].Comments[encoder.LineComment] = "Expected TPM measurements." AWSSEVSNPDoc.Fields[1].Name = "bootloaderVersion" - AWSSEVSNPDoc.Fields[1].Type = "" + AWSSEVSNPDoc.Fields[1].Type = "AttestationVersion" AWSSEVSNPDoc.Fields[1].Note = "" AWSSEVSNPDoc.Fields[1].Description = "Lowest acceptable bootloader version." AWSSEVSNPDoc.Fields[1].Comments[encoder.LineComment] = "Lowest acceptable bootloader version." AWSSEVSNPDoc.Fields[2].Name = "teeVersion" - AWSSEVSNPDoc.Fields[2].Type = "" + AWSSEVSNPDoc.Fields[2].Type = "AttestationVersion" AWSSEVSNPDoc.Fields[2].Note = "" AWSSEVSNPDoc.Fields[2].Description = "Lowest acceptable TEE version." AWSSEVSNPDoc.Fields[2].Comments[encoder.LineComment] = "Lowest acceptable TEE version." AWSSEVSNPDoc.Fields[3].Name = "snpVersion" - AWSSEVSNPDoc.Fields[3].Type = "" + AWSSEVSNPDoc.Fields[3].Type = "AttestationVersion" AWSSEVSNPDoc.Fields[3].Note = "" AWSSEVSNPDoc.Fields[3].Description = "Lowest acceptable SEV-SNP version." AWSSEVSNPDoc.Fields[3].Comments[encoder.LineComment] = "Lowest acceptable SEV-SNP version." AWSSEVSNPDoc.Fields[4].Name = "microcodeVersion" - AWSSEVSNPDoc.Fields[4].Type = "" + AWSSEVSNPDoc.Fields[4].Type = "AttestationVersion" AWSSEVSNPDoc.Fields[4].Note = "" AWSSEVSNPDoc.Fields[4].Description = "Lowest acceptable microcode version." AWSSEVSNPDoc.Fields[4].Comments[encoder.LineComment] = "Lowest acceptable microcode version." @@ -690,22 +628,22 @@ func init() { AzureSEVSNPDoc.Fields[0].Description = "Expected TPM measurements." AzureSEVSNPDoc.Fields[0].Comments[encoder.LineComment] = "Expected TPM measurements." AzureSEVSNPDoc.Fields[1].Name = "bootloaderVersion" - AzureSEVSNPDoc.Fields[1].Type = "" + AzureSEVSNPDoc.Fields[1].Type = "AttestationVersion" AzureSEVSNPDoc.Fields[1].Note = "" AzureSEVSNPDoc.Fields[1].Description = "Lowest acceptable bootloader version." AzureSEVSNPDoc.Fields[1].Comments[encoder.LineComment] = "Lowest acceptable bootloader version." AzureSEVSNPDoc.Fields[2].Name = "teeVersion" - AzureSEVSNPDoc.Fields[2].Type = "" + AzureSEVSNPDoc.Fields[2].Type = "AttestationVersion" AzureSEVSNPDoc.Fields[2].Note = "" AzureSEVSNPDoc.Fields[2].Description = "Lowest acceptable TEE version." AzureSEVSNPDoc.Fields[2].Comments[encoder.LineComment] = "Lowest acceptable TEE version." AzureSEVSNPDoc.Fields[3].Name = "snpVersion" - AzureSEVSNPDoc.Fields[3].Type = "" + AzureSEVSNPDoc.Fields[3].Type = "AttestationVersion" AzureSEVSNPDoc.Fields[3].Note = "" AzureSEVSNPDoc.Fields[3].Description = "Lowest acceptable SEV-SNP version." AzureSEVSNPDoc.Fields[3].Comments[encoder.LineComment] = "Lowest acceptable SEV-SNP version." AzureSEVSNPDoc.Fields[4].Name = "microcodeVersion" - AzureSEVSNPDoc.Fields[4].Type = "" + AzureSEVSNPDoc.Fields[4].Type = "AttestationVersion" AzureSEVSNPDoc.Fields[4].Note = "" AzureSEVSNPDoc.Fields[4].Description = "Lowest acceptable microcode version." AzureSEVSNPDoc.Fields[4].Comments[encoder.LineComment] = "Lowest acceptable microcode version." @@ -757,22 +695,22 @@ func init() { AzureTDXDoc.Fields[0].Description = "Expected TPM measurements." AzureTDXDoc.Fields[0].Comments[encoder.LineComment] = "Expected TPM measurements." AzureTDXDoc.Fields[1].Name = "qeSVN" - AzureTDXDoc.Fields[1].Type = "" + AzureTDXDoc.Fields[1].Type = "uint16" AzureTDXDoc.Fields[1].Note = "" AzureTDXDoc.Fields[1].Description = "Minimum required QE security version number (SVN)." AzureTDXDoc.Fields[1].Comments[encoder.LineComment] = "Minimum required QE security version number (SVN)." AzureTDXDoc.Fields[2].Name = "pceSVN" - AzureTDXDoc.Fields[2].Type = "" + AzureTDXDoc.Fields[2].Type = "uint16" AzureTDXDoc.Fields[2].Note = "" AzureTDXDoc.Fields[2].Description = "Minimum required PCE security version number (SVN)." AzureTDXDoc.Fields[2].Comments[encoder.LineComment] = "Minimum required PCE security version number (SVN)." AzureTDXDoc.Fields[3].Name = "teeTCBSVN" - AzureTDXDoc.Fields[3].Type = "" + AzureTDXDoc.Fields[3].Type = "HexBytes" AzureTDXDoc.Fields[3].Note = "" AzureTDXDoc.Fields[3].Description = "Component-wise minimum required 16 byte hex-encoded TEE_TCB security version number (SVN)." AzureTDXDoc.Fields[3].Comments[encoder.LineComment] = "Component-wise minimum required 16 byte hex-encoded TEE_TCB security version number (SVN)." AzureTDXDoc.Fields[4].Name = "qeVendorID" - AzureTDXDoc.Fields[4].Type = "" + AzureTDXDoc.Fields[4].Type = "HexBytes" AzureTDXDoc.Fields[4].Note = "" AzureTDXDoc.Fields[4].Description = "Expected 16 byte hex-encoded QE_VENDOR_ID field." AzureTDXDoc.Fields[4].Comments[encoder.LineComment] = "Expected 16 byte hex-encoded QE_VENDOR_ID field." @@ -782,10 +720,10 @@ func init() { AzureTDXDoc.Fields[5].Description = "Expected 48 byte hex-encoded MR_SEAM value." AzureTDXDoc.Fields[5].Comments[encoder.LineComment] = "Expected 48 byte hex-encoded MR_SEAM value." AzureTDXDoc.Fields[6].Name = "xfam" - AzureTDXDoc.Fields[6].Type = "" + AzureTDXDoc.Fields[6].Type = "HexBytes" AzureTDXDoc.Fields[6].Note = "" - AzureTDXDoc.Fields[6].Description = "Expected 8 byte hex-encoded eXtended Features Available Mask (XFAM) field. Defaults to the latest available XFAM on Azure VMs. Unset to disable validation." - AzureTDXDoc.Fields[6].Comments[encoder.LineComment] = "Expected 8 byte hex-encoded eXtended Features Available Mask (XFAM) field. Defaults to the latest available XFAM on Azure VMs. Unset to disable validation." + AzureTDXDoc.Fields[6].Description = "Expected 8 byte hex-encoded XFAM field." + AzureTDXDoc.Fields[6].Comments[encoder.LineComment] = "Expected 8 byte hex-encoded XFAM field." AzureTDXDoc.Fields[7].Name = "intelRootKey" AzureTDXDoc.Fields[7].Type = "Certificate" AzureTDXDoc.Fields[7].Note = "" @@ -841,10 +779,6 @@ func (_ GCPSEVES) Doc() *encoder.Doc { return &GCPSEVESDoc } -func (_ GCPSEVSNP) Doc() *encoder.Doc { - return &GCPSEVSNPDoc -} - func (_ QEMUVTPM) Doc() *encoder.Doc { return &QEMUVTPMDoc } @@ -891,7 +825,6 @@ func GetConfigurationDoc() *encoder.FileDoc { &UnsupportedAppRegistrationErrorDoc, &SNPFirmwareSignerConfigDoc, &GCPSEVESDoc, - &GCPSEVSNPDoc, &QEMUVTPMDoc, &QEMUTDXDoc, &AWSSEVSNPDoc, diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 1c9fbe50f..013c50edc 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -70,10 +70,10 @@ func TestGetAttestationConfigMarshalsNumericalVersion(t *testing.T) { var mp map[string]interface{} require.NoError(yaml.Unmarshal(bt, &mp)) assert := assert.New(t) - assert.EqualValues(placeholderVersionValue[uint8](), mp["microcodeVersion"]) - assert.EqualValues(placeholderVersionValue[uint8](), mp["teeVersion"]) - assert.EqualValues(placeholderVersionValue[uint8](), mp["snpVersion"]) - assert.EqualValues(placeholderVersionValue[uint8](), mp["bootloaderVersion"]) + assert.Equal(placeholderVersionValue, mp["microcodeVersion"]) + assert.Equal(placeholderVersionValue, mp["teeVersion"]) + assert.Equal(placeholderVersionValue, mp["snpVersion"]) + assert.Equal(placeholderVersionValue, mp["bootloaderVersion"]) } func TestNew(t *testing.T) { @@ -99,19 +99,19 @@ func TestNew(t *testing.T) { wantResult: func() *Config { conf := Default() modifyConfigForAzureToPassValidate(conf) - conf.Attestation.AzureSEVSNP.MicrocodeVersion = AttestationVersion[uint8]{ + conf.Attestation.AzureSEVSNP.MicrocodeVersion = AttestationVersion{ Value: testCfg.Microcode, WantLatest: true, } - conf.Attestation.AzureSEVSNP.TEEVersion = AttestationVersion[uint8]{ + conf.Attestation.AzureSEVSNP.TEEVersion = AttestationVersion{ Value: 2, WantLatest: false, } - conf.Attestation.AzureSEVSNP.BootloaderVersion = AttestationVersion[uint8]{ + conf.Attestation.AzureSEVSNP.BootloaderVersion = AttestationVersion{ Value: 1, WantLatest: false, } - conf.Attestation.AzureSEVSNP.SNPVersion = AttestationVersion[uint8]{ + conf.Attestation.AzureSEVSNP.SNPVersion = AttestationVersion{ Value: testCfg.SNP, WantLatest: true, } @@ -328,12 +328,12 @@ func TestFromFile(t *testing.T) { } func TestValidate(t *testing.T) { - const defaultErrCount = 33 // expect this number of error messages by default because user-specific values are not set and multiple providers are defined by default + const defaultErrCount = 32 // expect this number of error messages by default because user-specific values are not set and multiple providers are defined by default const azErrCount = 7 const awsErrCount = 8 const gcpErrCount = 8 - // TODO(AB#3132): refactor config validation tests + // TODO(AB#3132,3u13r): refactor config validation tests // Note that the `cnf.Image = ""` is a hack to align `bazel test` with `go test` behavior // since first does version stamping. testCases := map[string]struct { @@ -464,10 +464,9 @@ func TestValidate(t *testing.T) { gcp.Project = "test-project" gcp.Zone = "test-zone" gcp.ServiceAccountKeyPath = "test-key-path" - gcp.IAMServiceAccountVM = "example@example.com" cnf.Provider = ProviderConfig{} cnf.Provider.GCP = gcp - cnf.Attestation.GCPSEVSNP.Measurements = measurements.M{ + cnf.Attestation.GCPSEVES.Measurements = measurements.M{ 0: measurements.WithAllBytes(0x00, measurements.Enforce, measurements.PCRMeasurementLength), } cnf.NodeGroups = map[string]NodeGroup{ @@ -625,11 +624,11 @@ func TestConfig_UpdateMeasurements(t *testing.T) { { // GCP conf := Default() conf.RemoveProviderAndAttestationExcept(cloudprovider.GCP) - for k := range conf.Attestation.GCPSEVSNP.Measurements { - delete(conf.Attestation.GCPSEVSNP.Measurements, k) + for k := range conf.Attestation.GCPSEVES.Measurements { + delete(conf.Attestation.GCPSEVES.Measurements, k) } conf.UpdateMeasurements(newMeasurements) - assert.Equal(newMeasurements, conf.Attestation.GCPSEVSNP.Measurements) + assert.Equal(newMeasurements, conf.Attestation.GCPSEVES.Measurements) } { // QEMU conf := Default() @@ -689,80 +688,62 @@ func TestValidInstanceTypeForProvider(t *testing.T) { testCases := map[string]struct { variant variant.Variant instanceTypes []string - providerConfig ProviderConfig expectedResult bool }{ "empty all": { variant: variant.Dummy{}, instanceTypes: []string{}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "empty aws": { variant: variant.AWSSEVSNP{}, instanceTypes: []string{}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "empty azure only CVMs": { variant: variant.AzureSEVSNP{}, instanceTypes: []string{}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "empty azure with non-CVMs": { variant: variant.AzureTrustedLaunch{}, instanceTypes: []string{}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "empty gcp": { variant: variant.GCPSEVES{}, instanceTypes: []string{}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "azure only CVMs (SNP)": { variant: variant.AzureSEVSNP{}, instanceTypes: instancetypes.AzureSNPInstanceTypes, expectedResult: true, - providerConfig: ProviderConfig{}, }, "azure only CVMs (TDX)": { variant: variant.AzureTDX{}, instanceTypes: instancetypes.AzureTDXInstanceTypes, expectedResult: true, - providerConfig: ProviderConfig{}, }, "azure trusted launch VMs": { variant: variant.AzureTrustedLaunch{}, instanceTypes: instancetypes.AzureTrustedLaunchInstanceTypes, expectedResult: true, - providerConfig: ProviderConfig{}, }, "gcp": { variant: variant.GCPSEVES{}, instanceTypes: instancetypes.GCPInstanceTypes, expectedResult: true, - providerConfig: ProviderConfig{}, - }, - "gcp sev-snp": { - variant: variant.GCPSEVSNP{}, - instanceTypes: instancetypes.GCPInstanceTypes, - expectedResult: true, - providerConfig: ProviderConfig{}, }, "put gcp when azure is set": { variant: variant.AzureSEVSNP{}, instanceTypes: instancetypes.GCPInstanceTypes, expectedResult: false, - providerConfig: ProviderConfig{}, }, "put azure when gcp is set": { variant: variant.GCPSEVES{}, instanceTypes: instancetypes.AzureSNPInstanceTypes, expectedResult: false, - providerConfig: ProviderConfig{}, }, // Testing every possible instance type for AWS is not feasible, so we just test a few based on known supported / unsupported families // Also serves as a test for checkIfInstanceInValidAWSFamilys @@ -770,79 +751,31 @@ func TestValidInstanceTypeForProvider(t *testing.T) { variant: variant.AWSSEVSNP{}, instanceTypes: []string{"c5.xlarge", "c5a.2xlarge", "c5a.16xlarge", "u-12tb1.112xlarge"}, expectedResult: false, // False because 2 two of the instances are not valid - providerConfig: ProviderConfig{}, }, "aws one valid instance one with too little vCPUs": { variant: variant.AWSSEVSNP{}, instanceTypes: []string{"c5.medium"}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "aws graviton sub-family unsupported": { variant: variant.AWSSEVSNP{}, instanceTypes: []string{"m6g.xlarge", "r6g.2xlarge", "x2gd.xlarge", "g5g.8xlarge"}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "aws combined two valid instances as one string": { variant: variant.AWSSEVSNP{}, instanceTypes: []string{"c5.xlarge, c5a.2xlarge"}, expectedResult: false, - providerConfig: ProviderConfig{}, }, "aws only CVMs": { variant: variant.AWSSEVSNP{}, instanceTypes: []string{"c6a.xlarge", "m6a.xlarge", "r6a.xlarge"}, expectedResult: true, - providerConfig: ProviderConfig{}, }, "aws nitroTPM VMs": { variant: variant.AWSNitroTPM{}, instanceTypes: []string{"c5.xlarge", "c5a.2xlarge", "c5a.16xlarge", "u-12tb1.112xlarge"}, expectedResult: true, - providerConfig: ProviderConfig{}, - }, - "stackit valid flavors": { - variant: variant.QEMUVTPM{}, - instanceTypes: []string{ - "m1a.2cd", - "m1a.4cd", - "m1a.8cd", - "m1a.16cd", - "m1a.30cd", - }, - expectedResult: true, - providerConfig: ProviderConfig{OpenStack: &OpenStackConfig{Cloud: "stackit"}}, - }, - "stackit not valid flavors": { - variant: variant.QEMUVTPM{}, - instanceTypes: []string{ - // removed the c which indicates a confidential flavor - "m1a.2d", - "m1a.4d", - "m1a.8d", - "m1a.16d", - "m1a.30d", - }, - expectedResult: false, - providerConfig: ProviderConfig{OpenStack: &OpenStackConfig{Cloud: "stackit"}}, - }, - "openstack cloud named test": { - variant: variant.QEMUVTPM{}, - instanceTypes: []string{ - "foo.bar", - "foo.bar1", - }, - expectedResult: true, - providerConfig: ProviderConfig{OpenStack: &OpenStackConfig{Cloud: "test"}}, - }, - "Qemutdx valid instance type": { - variant: variant.QEMUTDX{}, - instanceTypes: []string{ - "foo.bar", - }, - expectedResult: true, - providerConfig: ProviderConfig{QEMU: &QEMUConfig{}}, }, } for name, tc := range testCases { @@ -850,7 +783,7 @@ func TestValidInstanceTypeForProvider(t *testing.T) { assert := assert.New(t) for _, instanceType := range tc.instanceTypes { assert.Equal( - tc.expectedResult, validInstanceTypeForProvider(instanceType, tc.variant, tc.providerConfig), + tc.expectedResult, validInstanceTypeForProvider(instanceType, tc.variant), instanceType, ) } @@ -1113,8 +1046,18 @@ func getConfigAsMap(conf *Config, t *testing.T) (res configMap) { type stubAttestationFetcher struct{} -func (f stubAttestationFetcher) FetchLatestVersion(_ context.Context, _ variant.Variant) (attestationconfigapi.Entry, error) { - return attestationconfigapi.Entry{ +func (f stubAttestationFetcher) FetchSEVSNPVersionList(_ context.Context, _ attestationconfigapi.SEVSNPVersionList) (attestationconfigapi.SEVSNPVersionList, error) { + return attestationconfigapi.SEVSNPVersionList{}, nil +} + +func (f stubAttestationFetcher) FetchSEVSNPVersion(_ context.Context, _ attestationconfigapi.SEVSNPVersionAPI) (attestationconfigapi.SEVSNPVersionAPI, error) { + return attestationconfigapi.SEVSNPVersionAPI{ + SEVSNPVersion: testCfg, + }, nil +} + +func (f stubAttestationFetcher) FetchSEVSNPVersionLatest(_ context.Context, _ variant.Variant) (attestationconfigapi.SEVSNPVersionAPI, error) { + return attestationconfigapi.SEVSNPVersionAPI{ SEVSNPVersion: testCfg, }, nil } diff --git a/internal/config/disktypes/aws.go b/internal/config/disktypes/aws.go index bb0e0586b..8fcdc354a 100644 --- a/internal/config/disktypes/aws.go +++ b/internal/config/disktypes/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package disktypes diff --git a/internal/config/disktypes/azure.go b/internal/config/disktypes/azure.go index 94078b07f..8903a45d2 100644 --- a/internal/config/disktypes/azure.go +++ b/internal/config/disktypes/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package disktypes diff --git a/internal/config/disktypes/gcp.go b/internal/config/disktypes/gcp.go index cfb9315c2..3880b9a2f 100644 --- a/internal/config/disktypes/gcp.go +++ b/internal/config/disktypes/gcp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package disktypes diff --git a/internal/config/gcp.go b/internal/config/gcp.go deleted file mode 100644 index 79fe43399..000000000 --- a/internal/config/gcp.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 -*/ - -package config - -import ( - "bytes" - "context" - "fmt" - - "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" - "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" - "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" -) - -var _ svnResolveMarshaller = &GCPSEVSNP{} - -// DefaultForGCPSEVSNP provides a valid default configuration for GCP SEV-SNP attestation. -func DefaultForGCPSEVSNP() *GCPSEVSNP { - return &GCPSEVSNP{ - Measurements: measurements.DefaultsFor(cloudprovider.GCP, variant.GCPSEVSNP{}), - BootloaderVersion: NewLatestPlaceholderVersion[uint8](), - TEEVersion: NewLatestPlaceholderVersion[uint8](), - SNPVersion: NewLatestPlaceholderVersion[uint8](), - MicrocodeVersion: NewLatestPlaceholderVersion[uint8](), - AMDRootKey: mustParsePEM(arkPEM), - } -} - -// GetVariant returns gcp-sev-snp as the variant. -func (GCPSEVSNP) GetVariant() variant.Variant { - return variant.GCPSEVSNP{} -} - -// GetMeasurements returns the measurements used for attestation. -func (c GCPSEVSNP) GetMeasurements() measurements.M { - return c.Measurements -} - -// SetMeasurements updates a config's measurements using the given measurements. -func (c *GCPSEVSNP) SetMeasurements(m measurements.M) { - c.Measurements = m -} - -// EqualTo returns true if the config is equal to the given config. -func (c GCPSEVSNP) EqualTo(other AttestationCfg) (bool, error) { - otherCfg, ok := other.(*GCPSEVSNP) - if !ok { - return false, fmt.Errorf("cannot compare %T with %T", c, other) - } - - measurementsEqual := c.Measurements.EqualTo(otherCfg.Measurements) - bootloaderEqual := c.BootloaderVersion == otherCfg.BootloaderVersion - teeEqual := c.TEEVersion == otherCfg.TEEVersion - snpEqual := c.SNPVersion == otherCfg.SNPVersion - microcodeEqual := c.MicrocodeVersion == otherCfg.MicrocodeVersion - rootKeyEqual := bytes.Equal(c.AMDRootKey.Raw, otherCfg.AMDRootKey.Raw) - signingKeyEqual := bytes.Equal(c.AMDSigningKey.Raw, otherCfg.AMDSigningKey.Raw) - - return measurementsEqual && bootloaderEqual && teeEqual && snpEqual && microcodeEqual && rootKeyEqual && signingKeyEqual, nil -} - -func (c *GCPSEVSNP) getToMarshallLatestWithResolvedVersions() AttestationCfg { - cp := *c - cp.BootloaderVersion.WantLatest = false - cp.TEEVersion.WantLatest = false - cp.SNPVersion.WantLatest = false - cp.MicrocodeVersion.WantLatest = false - return &cp -} - -// FetchAndSetLatestVersionNumbers fetches the latest version numbers from the configapi and sets them. -func (c *GCPSEVSNP) FetchAndSetLatestVersionNumbers(ctx context.Context, fetcher attestationconfigapi.Fetcher) error { - // Only talk to the API if at least one version number is set to latest. - if !(c.BootloaderVersion.WantLatest || c.TEEVersion.WantLatest || c.SNPVersion.WantLatest || c.MicrocodeVersion.WantLatest) { - return nil - } - - versions, err := fetcher.FetchLatestVersion(ctx, variant.GCPSEVSNP{}) - if err != nil { - return fmt.Errorf("fetching latest TCB versions from configapi: %w", err) - } - // set number and keep isLatest flag - c.mergeWithLatestVersion(versions.SEVSNPVersion) - return nil -} - -func (c *GCPSEVSNP) mergeWithLatestVersion(latest attestationconfigapi.SEVSNPVersion) { - if c.BootloaderVersion.WantLatest { - c.BootloaderVersion.Value = latest.Bootloader - } - if c.TEEVersion.WantLatest { - c.TEEVersion.Value = latest.TEE - } - if c.SNPVersion.WantLatest { - c.SNPVersion.Value = latest.SNP - } - if c.MicrocodeVersion.WantLatest { - c.MicrocodeVersion.Value = latest.Microcode - } -} - -// GetVariant returns gcp-sev-es as the variant. -func (GCPSEVES) GetVariant() variant.Variant { - return variant.GCPSEVES{} -} - -// GetMeasurements returns the measurements used for attestation. -func (c GCPSEVES) GetMeasurements() measurements.M { - return c.Measurements -} - -// SetMeasurements updates a config's measurements using the given measurements. -func (c *GCPSEVES) SetMeasurements(m measurements.M) { - c.Measurements = m -} - -// EqualTo returns true if the config is equal to the given config. -func (c GCPSEVES) EqualTo(other AttestationCfg) (bool, error) { - otherCfg, ok := other.(*GCPSEVES) - if !ok { - return false, fmt.Errorf("cannot compare %T with %T", c, other) - } - return c.Measurements.EqualTo(otherCfg.Measurements), nil -} diff --git a/internal/config/image_enterprise.go b/internal/config/image_enterprise.go index 85fef6c52..ae2dbca8f 100644 --- a/internal/config/image_enterprise.go +++ b/internal/config/image_enterprise.go @@ -3,12 +3,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config const ( // defaultImage is the default image to use. - defaultImage = "ref/main/stream/nightly/v2.24.0-pre.0.20250731155507-57874454f72c" + defaultImage = "v2.16.3" ) diff --git a/internal/config/image_oss.go b/internal/config/image_oss.go index dc9141a8e..939c16995 100644 --- a/internal/config/image_oss.go +++ b/internal/config/image_oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config diff --git a/internal/config/imageversion/imageversion.go b/internal/config/imageversion/imageversion.go index 9e968715a..c295c40f4 100644 --- a/internal/config/imageversion/imageversion.go +++ b/internal/config/imageversion/imageversion.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package imageversion contains the pinned container images for the config. diff --git a/internal/config/imageversion/placeholder.go b/internal/config/imageversion/placeholder.go index a0273b96f..f5ba25ea4 100644 --- a/internal/config/imageversion/placeholder.go +++ b/internal/config/imageversion/placeholder.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package imageversion diff --git a/internal/config/instancetypes/aws.go b/internal/config/instancetypes/aws.go index edafba99d..712cc4f86 100644 --- a/internal/config/instancetypes/aws.go +++ b/internal/config/instancetypes/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package instancetypes diff --git a/internal/config/instancetypes/azure.go b/internal/config/instancetypes/azure.go index 483e950bd..c07609710 100644 --- a/internal/config/instancetypes/azure.go +++ b/internal/config/instancetypes/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package instancetypes diff --git a/internal/config/instancetypes/gcp.go b/internal/config/instancetypes/gcp.go index 5d85a9a60..c9d02a345 100644 --- a/internal/config/instancetypes/gcp.go +++ b/internal/config/instancetypes/gcp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package instancetypes diff --git a/internal/config/instancetypes/stackit.go b/internal/config/instancetypes/stackit.go index 83ab851c1..68ea21d94 100644 --- a/internal/config/instancetypes/stackit.go +++ b/internal/config/instancetypes/stackit.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package instancetypes diff --git a/internal/config/migration/migration.go b/internal/config/migration/migration.go index 4799f162f..54ca54335 100644 --- a/internal/config/migration/migration.go +++ b/internal/config/migration/migration.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package migration contains outdated configuration formats and their migration functions. @@ -140,6 +140,10 @@ type AWSSEVSNP struct { // description: | // Expected TPM measurements. Measurements measurements.M `json:"measurements" yaml:"measurements" validate:"required,no_placeholders"` + // TODO(derpsteb): reenable launchMeasurement once SNP is fixed on AWS. + // description: | + // Expected launch measurement in SNP report. + // LaunchMeasurement measurements.Measurement `json:"launchMeasurement" yaml:"launchMeasurement" validate:"required"` } // AWSNitroTPM is the configuration for AWS Nitro TPM attestation. @@ -411,19 +415,19 @@ func V3ToV4(path string, fileHandler file.Handler) error { case cfgV3.Attestation.AzureSEVSNP != nil: cfgV4.Attestation.AzureSEVSNP = &config.AzureSEVSNP{ Measurements: cfgV3.Attestation.AzureSEVSNP.Measurements, - BootloaderVersion: config.AttestationVersion[uint8]{ + BootloaderVersion: config.AttestationVersion{ Value: cfgV3.Attestation.AzureSEVSNP.BootloaderVersion.Value, WantLatest: cfgV3.Attestation.AzureSEVSNP.BootloaderVersion.WantLatest, }, - TEEVersion: config.AttestationVersion[uint8]{ + TEEVersion: config.AttestationVersion{ Value: cfgV3.Attestation.AzureSEVSNP.TEEVersion.Value, WantLatest: cfgV3.Attestation.AzureSEVSNP.TEEVersion.WantLatest, }, - SNPVersion: config.AttestationVersion[uint8]{ + SNPVersion: config.AttestationVersion{ Value: cfgV3.Attestation.AzureSEVSNP.SNPVersion.Value, WantLatest: cfgV3.Attestation.AzureSEVSNP.SNPVersion.WantLatest, }, - MicrocodeVersion: config.AttestationVersion[uint8]{ + MicrocodeVersion: config.AttestationVersion{ Value: cfgV3.Attestation.AzureSEVSNP.MicrocodeVersion.Value, WantLatest: cfgV3.Attestation.AzureSEVSNP.MicrocodeVersion.WantLatest, }, diff --git a/internal/config/validation.go b/internal/config/validation.go index 68a7bf821..5e0ef59ee 100644 --- a/internal/config/validation.go +++ b/internal/config/validation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config @@ -202,9 +202,6 @@ func validateAttestation(sl validator.StructLevel) { if attestation.GCPSEVES != nil { attestationCount++ } - if attestation.GCPSEVSNP != nil { - attestationCount++ - } if attestation.QEMUVTPM != nil { attestationCount++ } @@ -250,7 +247,7 @@ func translateNoAttestationError(ut ut.Translator, fe validator.FieldError) stri } func registerNoAttestationError(ut ut.Translator) error { - return ut.Add("no_attestation", "{0}: No attestation has been defined (requires either awsSEVSNP, awsNitroTPM, azureSEVSNP, azureTDX, azureTrustedLaunch, gcpSEVES, gcpSEVSNP, or qemuVTPM)", true) + return ut.Add("no_attestation", "{0}: No attestation has been defined (requires either awsSEVSNP, awsNitroTPM, azureSEVSNP, azureTDX, azureTrustedLaunch, gcpSEVES, or qemuVTPM)", true) } func translateNoDefaultControlPlaneGroupError(ut ut.Translator, fe validator.FieldError) string { @@ -367,9 +364,6 @@ func (c *Config) translateMoreThanOneAttestationError(ut ut.Translator, fe valid if c.Attestation.GCPSEVES != nil { definedAttestations = append(definedAttestations, "GCPSEVES") } - if c.Attestation.GCPSEVSNP != nil { - definedAttestations = append(definedAttestations, "GCPSEVSNP") - } if c.Attestation.QEMUVTPM != nil { definedAttestations = append(definedAttestations, "QEMUVTPM") } @@ -520,7 +514,7 @@ func (c *Config) translateMoreThanOneProviderError(ut ut.Translator, fe validato return t } -func validInstanceTypeForProvider(insType string, attestation variant.Variant, provider ProviderConfig) bool { +func validInstanceTypeForProvider(insType string, attestation variant.Variant) bool { switch attestation { case variant.AWSSEVSNP{}, variant.AWSNitroTPM{}: return isSupportedAWSInstanceType(insType, attestation.Equal(variant.AWSNitroTPM{})) @@ -542,24 +536,13 @@ func validInstanceTypeForProvider(insType string, attestation variant.Variant, p return true } } - case variant.GCPSEVES{}, variant.GCPSEVSNP{}: + case variant.GCPSEVES{}: for _, instanceType := range instancetypes.GCPInstanceTypes { if insType == instanceType { return true } } case variant.QEMUVTPM{}, variant.QEMUTDX{}: - // only allow confidential instances on stackit cloud using QEMU vTPM - if provider.OpenStack != nil { - if cloud := provider.OpenStack.Cloud; strings.ToLower(cloud) == "stackit" { - for _, instanceType := range instancetypes.STACKITInstanceTypes { - if insType == instanceType { - return true - } - } - return false - } - } return true } return false @@ -800,7 +783,7 @@ func (c *Config) validateNodeGroupZoneField(fl validator.FieldLevel) bool { } func (c *Config) validateInstanceType(fl validator.FieldLevel) bool { - return validInstanceTypeForProvider(fl.Field().String(), c.GetAttestationConfig().GetVariant(), c.Provider) + return validInstanceTypeForProvider(fl.Field().String(), c.GetAttestationConfig().GetVariant()) } func (c *Config) validateStateDiskTypeField(fl validator.FieldLevel) bool { diff --git a/internal/config/validation_test.go b/internal/config/validation_test.go index 2cf9dbc8a..0a996580e 100644 --- a/internal/config/validation_test.go +++ b/internal/config/validation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package config diff --git a/internal/constants/constants.go b/internal/constants/constants.go index 56fca9ef7..c313b74a6 100644 --- a/internal/constants/constants.go +++ b/internal/constants/constants.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -42,14 +42,6 @@ const ( DefaultWorkerGroupName = "worker_default" // CLIDebugLogFile is the name of the debug log file for constellation init/constellation apply. CLIDebugLogFile = "constellation-debug.log" - // SSHCAKeySuffix is the suffix used together with the DEKPrefix to derive an SSH CA key for emergency ssh access. - SSHCAKeySuffix = "ca_emergency_ssh" - // SSHCAKeyPath is the path to the emergency SSH CA key on the node. - SSHCAKeyPath = "/var/run/state/ssh/ssh_ca.pub" - // SSHHostKeyPath is the path to the SSH host key of the node. - SSHHostKeyPath = "/var/run/state/ssh/ssh_host_ed25519_key" - // SSHHostCertificatePath is the path to the SSH host certificate. - SSHHostCertificatePath = "/var/run/state/ssh/ssh_host_cert.pub" // // Ports. diff --git a/internal/constants/enterprise.go b/internal/constants/enterprise.go index 9c4d7421c..7e219eb95 100644 --- a/internal/constants/enterprise.go +++ b/internal/constants/enterprise.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constants diff --git a/internal/constants/oss.go b/internal/constants/oss.go index 122d3e8f8..505084c59 100644 --- a/internal/constants/oss.go +++ b/internal/constants/oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constants diff --git a/internal/constellation/BUILD.bazel b/internal/constellation/BUILD.bazel index 02afff0e9..58bd5d235 100644 --- a/internal/constellation/BUILD.bazel +++ b/internal/constellation/BUILD.bazel @@ -37,13 +37,8 @@ go_library( "//internal/semver", "//internal/versions", "@io_k8s_apiextensions_apiserver//pkg/apis/apiextensions/v1:apiextensions", - "@io_k8s_apimachinery//pkg/api/errors", - "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", - "@io_k8s_apimachinery//pkg/runtime/schema", - "@io_k8s_apimachinery//pkg/types", - "@io_k8s_client_go//dynamic", "@io_k8s_client_go//tools/clientcmd", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", ], ) @@ -74,6 +69,6 @@ go_test( "@com_github_stretchr_testify//require", "@io_k8s_client_go//tools/clientcmd", "@io_k8s_client_go//tools/clientcmd/api", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", ], ) diff --git a/internal/constellation/apply.go b/internal/constellation/apply.go index 6c541cb3b..bbd61cf8c 100644 --- a/internal/constellation/apply.go +++ b/internal/constellation/apply.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation @@ -18,8 +18,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/kms/uri" "github.com/edgelesssys/constellation/v2/internal/license" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/clientcmd" ) // ApplyContext denotes the context in which the apply command is run. @@ -46,7 +44,6 @@ type Applier struct { newDialer func(validator atls.Validator) *dialer.Dialer kubecmdClient kubecmdClient helmClient helmApplier - dynamicClient dynamic.Interface } type licenseChecker interface { @@ -82,24 +79,15 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error { if err != nil { return err } - restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig) - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(restConfig) - if err != nil { - return err - } a.kubecmdClient = kubecmdClient a.helmClient = helmClient - a.dynamicClient = dynamicClient return nil } // CheckLicense checks the given Constellation license with the license server // and returns the allowed quota for the license. func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) { - a.log.Debug(fmt.Sprintf("Contacting license server for license %q", licenseID)) + a.log.Debug(fmt.Sprintf("Contacting license server for license '%s'", licenseID)) var action license.Action if initRequest { @@ -115,7 +103,7 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, if err != nil { return 0, fmt.Errorf("checking license: %w", err) } - a.log.Debug(fmt.Sprintf("Got response from license server for license %q", licenseID)) + a.log.Debug(fmt.Sprintf("Got response from license server for license '%s'", licenseID)) return quota, nil } diff --git a/internal/constellation/apply_test.go b/internal/constellation/apply_test.go index c7864a7b2..54e845033 100644 --- a/internal/constellation/apply_test.go +++ b/internal/constellation/apply_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation @@ -38,7 +38,7 @@ func TestCheckLicense(t *testing.T) { require := require.New(t) a := &Applier{licenseChecker: tc.licenseChecker, log: logger.NewTest(t)} - _, err := a.CheckLicense(t.Context(), cloudprovider.Unknown, true, license.CommunityLicense) + _, err := a.CheckLicense(context.Background(), cloudprovider.Unknown, true, license.CommunityLicense) if tc.wantErr { require.Error(err) } else { diff --git a/internal/constellation/applyinit.go b/internal/constellation/applyinit.go index 05a9b1e39..f02c9e8cc 100644 --- a/internal/constellation/applyinit.go +++ b/internal/constellation/applyinit.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation @@ -42,7 +42,7 @@ type InitPayload struct { // GrpcDialer dials a gRPC server. type GrpcDialer interface { - Dial(target string) (*grpc.ClientConn, error) + Dial(ctx context.Context, target string) (*grpc.ClientConn, error) } // Init performs the init RPC. @@ -85,12 +85,12 @@ func (a *Applier) Init( // Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one. serviceIsUnavailable := func(err error) bool { isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err) - a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", isServiceUnavailable, err)) + a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", isServiceUnavailable, err)) return isServiceUnavailable } // Perform the RPC - a.log.Debug("Initialization call", "endpoint", doer.endpoint) + a.log.Debug(fmt.Sprintf("Making initialization call, doer is %+v", doer)) a.spinner.Start("Connecting ", false) retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable) if err := retrier.Do(ctx); err != nil { @@ -99,7 +99,7 @@ func (a *Applier) Init( a.spinner.Stop() a.log.Debug("Initialization request finished") - a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %q", state.Infrastructure.ClusterEndpoint)) + a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint)) kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig) if err != nil { return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err) @@ -173,9 +173,9 @@ func (d *initDoer) Do(ctx context.Context) error { } } - conn, err := d.dialer.Dial(d.endpoint) + conn, err := d.dialer.Dial(ctx, d.endpoint) if err != nil { - d.log.Debug(fmt.Sprintf("Dialing init server failed: %q. Retrying...", err)) + d.log.Debug(fmt.Sprintf("Dialing init server failed: %s. Retrying...", err)) return fmt.Errorf("dialing init server: %w", err) } defer conn.Close() @@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error { res, err := resp.Recv() // get first response, either success or failure if err != nil { if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.resp = res.GetInitSuccess() case *initproto.InitResponse_InitFailure: if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %q", e)) + d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: errors.New(res.GetInitFailure().GetError()), @@ -225,7 +225,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.log.Debug("Cluster returned nil response type") err = errors.New("empty response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -236,7 +236,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.log.Debug("Cluster returned unknown response type") err = errors.New("unknown response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e)) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, diff --git a/internal/constellation/applyinit_test.go b/internal/constellation/applyinit_test.go index 59025bad2..7d16d5fe7 100644 --- a/internal/constellation/applyinit_test.go +++ b/internal/constellation/applyinit_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation @@ -214,7 +214,7 @@ func TestInit(t *testing.T) { } clusterLogs := &bytes.Buffer{} - ctx, cancel := context.WithTimeout(t.Context(), time.Second*4) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*4) defer cancel() _, err := a.Init(ctx, nil, tc.state, clusterLogs, InitPayload{ MasterSecret: uri.MasterSecret{}, @@ -280,7 +280,7 @@ func TestAttestation(t *testing.T) { } state := &state.State{Version: state.Version1, Infrastructure: state.Infrastructure{ClusterEndpoint: "192.0.2.4"}} - ctx := t.Context() + ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 4*time.Second) defer cancel() diff --git a/internal/constellation/constellation.go b/internal/constellation/constellation.go index 27202159e..af0fb6b4c 100644 --- a/internal/constellation/constellation.go +++ b/internal/constellation/constellation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/constellation/featureset/featureset.go b/internal/constellation/featureset/featureset.go index c20f9c361..b2ebb9cf7 100644 --- a/internal/constellation/featureset/featureset.go +++ b/internal/constellation/featureset/featureset.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package featureset provides a way to check whether a feature is enabled in the current build. diff --git a/internal/constellation/featureset/featureset_enterprise.go b/internal/constellation/featureset/featureset_enterprise.go index d79ccc006..3cd69c785 100644 --- a/internal/constellation/featureset/featureset_enterprise.go +++ b/internal/constellation/featureset/featureset_enterprise.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package featureset diff --git a/internal/constellation/featureset/featureset_oss.go b/internal/constellation/featureset/featureset_oss.go index 82f95317d..2072641d3 100644 --- a/internal/constellation/featureset/featureset_oss.go +++ b/internal/constellation/featureset/featureset_oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package featureset diff --git a/internal/constellation/helm.go b/internal/constellation/helm.go index 7d9cca7de..1378ce3a0 100644 --- a/internal/constellation/helm.go +++ b/internal/constellation/helm.go @@ -1,80 +1,19 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation import ( - "context" "errors" - "fmt" "github.com/edgelesssys/constellation/v2/internal/constellation/helm" "github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/kms/uri" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" ) -var patch = []byte(fmt.Sprintf(`{"metadata": {"labels": {%q: %q}, "annotations": {%q: %q, %q: %q}}}`, - "app.kubernetes.io/managed-by", "Helm", - "meta.helm.sh/release-name", "coredns", - "meta.helm.sh/release-namespace", "kube-system")) - -var namespacedCoreDNSResources = map[schema.GroupVersionResource]string{ - {Group: "apps", Version: "v1", Resource: "deployments"}: "coredns", - {Group: "", Version: "v1", Resource: "services"}: "kube-dns", - {Group: "", Version: "v1", Resource: "configmaps"}: "coredns", - {Group: "", Version: "v1", Resource: "serviceaccounts"}: "coredns", - {Group: "apps", Version: "v1", Resource: "statefulsets"}: "foobarbax", -} - -var coreDNSResources = map[schema.GroupVersionResource]string{ - {Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}: "system:coredns", - {Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}: "system:coredns", -} - -// AnnotateCoreDNSResources imports existing CoreDNS resources into the Helm release. -// -// This is only required when CoreDNS was installed by kubeadm directly. -// TODO(burgerdev): remove after v2.19 is released. -func (a *Applier) AnnotateCoreDNSResources(ctx context.Context) error { - for gvk, name := range coreDNSResources { - _, err := a.dynamicClient.Resource(gvk).Patch(ctx, name, types.StrategicMergePatchType, patch, v1.PatchOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - for gvk, name := range namespacedCoreDNSResources { - _, err := a.dynamicClient.Resource(gvk).Namespace("kube-system").Patch(ctx, name, types.StrategicMergePatchType, patch, v1.PatchOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return err - } - } - - return nil -} - -// CleanupCoreDNSResources removes CoreDNS resources that are not managed by Helm. -// -// This is only required when CoreDNS was installed by kubeadm directly. -// TODO(burgerdev): remove after v2.19 is released. -func (a *Applier) CleanupCoreDNSResources(ctx context.Context) error { - err := a.dynamicClient. - Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}). - Namespace("kube-system"). - Delete(ctx, "coredns", v1.DeleteOptions{}) - if !k8serrors.IsNotFound(err) { - return err - } - return nil -} - // PrepareHelmCharts loads Helm charts for Constellation and returns an executor to apply them. func (a *Applier) PrepareHelmCharts( flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret, diff --git a/internal/constellation/helm/BUILD.bazel b/internal/constellation/helm/BUILD.bazel index a36717668..6e3c5eee7 100644 --- a/internal/constellation/helm/BUILD.bazel +++ b/internal/constellation/helm/BUILD.bazel @@ -457,24 +457,6 @@ go_library( "charts/yawol/charts/yawol-controller/values.yaml", "charts/yawol/templates/.gitkeep", "charts/yawol/values.yaml", - "charts/edgeless/csi/charts/aws-csi-driver/templates/_node-windows.tpl", - "charts/edgeless/csi/charts/aws-csi-driver/templates/_node.tpl", - "charts/edgeless/csi/charts/aws-csi-driver/templates/ebs-csi-default-sc.yaml", - "charts/edgeless/csi/charts/aws-csi-driver/templates/role-leases.yaml", - "charts/edgeless/csi/charts/aws-csi-driver/templates/rolebinding-leases.yaml", - "charts/cert-manager/templates/cainjector-config.yaml", - "charts/cert-manager/templates/extras-objects.yaml", - "charts/cert-manager/templates/podmonitor.yaml", - "charts/coredns/Chart.yaml", - "charts/coredns/values.yaml", - "charts/coredns/templates/clusterrole.yaml", - "charts/coredns/templates/clusterrolebinding.yaml", - "charts/coredns/templates/configmap.yaml", - "charts/coredns/templates/deployment.yaml", - "charts/coredns/templates/service.yaml", - "charts/coredns/templates/serviceaccount.yaml", - "charts/aws-load-balancer-controller/templates/hpa.yaml", - "charts/cilium/files/cilium-envoy/configmap/bootstrap-config.yaml", ], importpath = "github.com/edgelesssys/constellation/v2/internal/constellation/helm", visibility = ["//:__subpackages__"], @@ -502,12 +484,11 @@ go_library( "@io_k8s_client_go//restmapper", "@io_k8s_client_go//tools/clientcmd", "@io_k8s_client_go//util/retry", - "@io_k8s_kubernetes//cmd/kubeadm/app/constants", + "@sh_helm_helm//pkg/ignore", "@sh_helm_helm_v3//pkg/action", "@sh_helm_helm_v3//pkg/chart", "@sh_helm_helm_v3//pkg/chart/loader", "@sh_helm_helm_v3//pkg/chartutil", - "@sh_helm_helm_v3//pkg/ignore", "@sh_helm_helm_v3//pkg/release", ], ) diff --git a/internal/constellation/helm/action.go b/internal/constellation/helm/action.go index f405beb29..30c1c312d 100644 --- a/internal/constellation/helm/action.go +++ b/internal/constellation/helm/action.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/helm/actionfactory.go b/internal/constellation/helm/actionfactory.go index 059ebe712..f1a069399 100644 --- a/internal/constellation/helm/actionfactory.go +++ b/internal/constellation/helm/actionfactory.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction( ) } - a.log.Debug(fmt.Sprintf("release %q not found, adding to new releases...", release.releaseName)) + a.log.Debug(fmt.Sprintf("release %s not found, adding to new releases...", release.releaseName)) *actions = append(*actions, a.newInstall(release, timeout)) return nil } if err != nil { return fmt.Errorf("getting version for %s: %w", release.releaseName, err) } - a.log.Debug(fmt.Sprintf("Current %q version: %q", release.releaseName, currentVersion)) - a.log.Debug(fmt.Sprintf("New %q version: %q", release.releaseName, newVersion)) + a.log.Debug(fmt.Sprintf("Current %s version: %s", release.releaseName, currentVersion)) + a.log.Debug(fmt.Sprintf("New %s version: %s", release.releaseName, newVersion)) if !force { // For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary). @@ -132,7 +132,7 @@ func (a actionFactory) appendNewAction( release.releaseName == certManagerInfo.releaseName { return ErrConfirmationMissing } - a.log.Debug(fmt.Sprintf("Upgrading %q from %q to %q", release.releaseName, currentVersion, newVersion)) + a.log.Debug(fmt.Sprintf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion)) *actions = append(*actions, a.newUpgrade(release, timeout)) return nil } @@ -165,7 +165,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error for _, dep := range chart.Dependencies() { for _, crdFile := range dep.Files { if strings.HasPrefix(crdFile.Name, "crds/") { - a.log.Debug(fmt.Sprintf("Updating crd: %q", crdFile.Name)) + a.log.Debug(fmt.Sprintf("Updating crd: %s", crdFile.Name)) err := a.kubeClient.ApplyCRD(ctx, crdFile.Data) if err != nil { return err diff --git a/internal/constellation/helm/actionfactory_test.go b/internal/constellation/helm/actionfactory_test.go index 93ec54dc8..960ea5a52 100644 --- a/internal/constellation/helm/actionfactory_test.go +++ b/internal/constellation/helm/actionfactory_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/Chart.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/Chart.yaml index 1f1b4e9ba..363fff854 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/Chart.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: aws-load-balancer-controller description: AWS Load Balancer Controller Helm chart for Kubernetes -version: 1.11.0 -appVersion: v2.11.0 +version: 1.5.4 +appVersion: v2.5.3 home: https://github.com/aws/eks-charts icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png sources: diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/README.md b/internal/constellation/helm/charts/aws-load-balancer-controller/README.md index 180e50c08..ee4be9aad 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/README.md +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/README.md @@ -22,11 +22,7 @@ AWS Load Balancer controller manages the following AWS resources As a security best practice, we recommend isolating the controller deployment pods to specific node groups which run critical components. The helm chart provides parameters ```nodeSelector```, ```tolerations``` and ```affinity``` to configure node isolation. For more information, please refer to the guidance [here](https://aws.github.io/aws-eks-best-practices/security/docs/multitenancy/#isolating-tenant-workloads-to-specific-nodes). ## Prerequisites -- Supported Kubernetes Versions - - Chart version v1.5.0+ requires Kubernetes 1.22+ - - Chart version v1.4.0+ requires Kubernetes 1.19+ - - Chart version v1.2.0 - v1.3.3 supports Kubernetes 1.16-1.21 - - Chart version v1.1.6 and before supports Kubernetes 1.15 +- Kubernetes >= 1.19 - IAM permissions - Helm v3 - Optional dependencies @@ -78,7 +74,7 @@ If migrating from ALB ingress controller, grant [additional IAM permissions](htt - Additional IAM permissions required, ensure you have granted the [required IAM permissions](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json). - CRDs need to be updated as follows ```shell script -kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master" +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" ``` - you can run helm upgrade without uninstalling the old chart completely @@ -96,11 +92,8 @@ If you are setting `serviceMonitor.enabled: true` you need to have installed the ## Installing the Chart **Note**: You need to uninstall aws-alb-ingress-controller. Please refer to the [upgrade](#Upgrade) section below before you proceed. - **Note**: Starting chart version 1.4.1, you need to explicitly set `clusterSecretsPermissions.allowAllSecrets` to true to grant the controller permission to access all secrets for OIDC feature. We recommend configuring access to individual secrets resource separately [[link](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/examples/secrets_access/)]. -**Note**: To ensure compatibility, we recommend installing the AWS Load Balancer controller image version with its compatible Helm chart version. Use the ```helm search repo eks/aws-load-balancer-controller --versions``` command to find the compatible versions. - Add the EKS repository to Helm: ```shell script helm repo add eks https://aws.github.io/eks-charts @@ -109,7 +102,7 @@ helm repo add eks https://aws.github.io/eks-charts Install the TargetGroupBinding CRDs: ```shell script -kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master" +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" ``` Install the AWS Load Balancer controller, if using iamserviceaccount @@ -178,106 +171,88 @@ Chart release v1.2.0 and later enables high availability configuration by defaul The following tables lists the configurable parameters of the chart and their default values. The default values set by the application itself can be confirmed [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/configurations/#controller-configuration-options). - -| Parameter | Description | Default | -| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | -| `image.repository` | image repository | `public.ecr.aws/eks/aws-load-balancer-controller` | -| `image.tag` | image tag | `` | -| `image.pullPolicy` | image pull policy | `IfNotPresent` | -| `clusterName` | Kubernetes cluster name | None | -| `cluster.dnsDomain` | DNS domain of the Kubernetes cluster, included in TLS certificate requests | `cluster.local` | -| `securityContext` | Set to security context for pod | `{}` | -| `resources` | Controller pod resource requests & limits | `{}` | -| `priorityClassName` | Controller pod priority class | system-cluster-critical | -| `nodeSelector` | Node labels for controller pod assignment | `{}` | -| `tolerations` | Controller pod toleration for taints | `{}` | -| `affinity` | Affinity for pod assignment | `{}` | -| `configureDefaultAffinity` | Configure soft pod anti-affinity if custom affinity is not configured | `true` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `{}` | -| `deploymentAnnotations` | Annotations to add to deployment | `{}` | -| `podAnnotations` | Annotations to add to each pod | `{}` | -| `podLabels` | Labels to add to each pod | `{}` | -| `additionalLabels` | Labels to add to all components | `{}` | -| `rbac.create` | if `true`, create and use RBAC resources | `true` | -| `serviceAccount.annotations` | optional annotations to add to service account | None | -| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a Service Account | `true` | -| `serviceAccount.imagePullSecrets` | List of image pull secrets to add to the Service Account | `[]` | -| `serviceAccount.create` | If `true`, create a new service account | `true` | -| `serviceAccount.name` | Service account to be used | None | -| `terminationGracePeriodSeconds` | Time period for controller pod to do a graceful shutdown | 10 | -| `ingressClass` | The ingress class to satisfy | alb | -| `createIngressClassResource` | Create ingressClass resource | true | -| `ingressClassParams.name` | IngressClassParams resource's name, default to the aws load balancer controller's name | None | -| `ingressClassParams.create` | If `true`, create a new ingressClassParams | true | -| `ingressClassParams.spec` | IngressClassParams defined ingress specifications | {} | -| `region` | The AWS region for the kubernetes cluster | None | -| `vpcId` | The VPC ID for the Kubernetes cluster | None | -| `awsApiEndpoints` | Custom AWS API Endpoints | None | -| `awsApiThrottle` | Custom AWS API throttle settings | None | -| `awsMaxRetries` | Maximum retries for AWS APIs | None | -| `defaultTargetType` | Default target type. Used as the default value of the `alb.ingress.kubernetes.io/target-type` and `service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" annotations.`Possible values are `ip` and `instance`. | `instance` | -| `enablePodReadinessGateInject` | If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods | None | -| `enableShield` | Enable Shield addon for ALB | None | -| `enableWaf` | Enable WAF addon for ALB | None | -| `enableWafv2` | Enable WAF V2 addon for ALB | None | -| `ingressMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for ingress | None | -| `logLevel` | Set the controller log level - info, debug | None | -| `metricsBindAddr` | The address the metric endpoint binds to | "" | -| `webhookConfig.disableIngressValidation` | Disables the validation of resources of kind Ingress | None | -| `webhookBindPort` | The TCP port the Webhook server binds to | None | -| `webhookTLS.caCert` | TLS CA certificate for webhook (auto-generated if not provided) | "" | -| `webhookTLS.cert` | TLS certificate for webhook (auto-generated if not provided) | "" | -| `webhookTLS.key` | TLS private key for webhook (auto-generated if not provided) | "" | -| `webhookNamespaceSelectors` | Namespace selectors for the wekbook | None | -| `keepTLSSecret` | Reuse existing TLS Secret during chart upgrade | `true` | -| `serviceAnnotations` | Annotations to be added to the provisioned webhook service resource | `{}` | -| `serviceMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for service | None | -| `targetgroupbindingMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for targetGroupBinding | None | -| `targetgroupbindingMaxExponentialBackoffDelay` | Maximum duration of exponential backoff for targetGroupBinding reconcile failures | None | -| `syncPeriod` | Period at which the controller forces the repopulation of its local object stores | None | -| `watchNamespace` | Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched | None | -| `disableIngressClassAnnotation` | Disables the usage of kubernetes.io/ingress.class annotation | None | -| `disableIngressGroupNameAnnotation` | Disables the usage of alb.ingress.kubernetes.io/group.name annotation | None | -| `tolerateNonExistentBackendService` | whether to allow rules that reference a backend service that does not exist. (When enabled, it will return 503 error if backend service not exist) | `true` | -| `tolerateNonExistentBackendAction` | whether to allow rules that reference a backend action that does not exist. (When enabled, it will return 503 error if backend action not exist) | `true` | -| `defaultSSLPolicy` | Specifies the default SSL policy to use for HTTPS or TLS listeners | None | -| `externalManagedTags` | Specifies the list of tag keys on AWS resources that are managed externally | `[]` | -| `livenessProbe` | Liveness probe settings for the controller | (see `values.yaml`) | -| `env` | Environment variables to set for aws-load-balancer-controller pod | None | -| `envFrom` | Environment variables to set for aws-load-balancer-controller pod from configMap or Secret | None | -| `envSecretName` | AWS credentials as environment variables from Secret (Secret keys `key_id` and `access_key`). | None | -| `hostNetwork` | If `true`, use hostNetwork | `false` | -| `dnsPolicy` | Set dnsPolicy if required | `ClusterFirst` | -| `extraVolumeMounts` | Extra volume mounts for the pod | `[]` | -| `extraVolumes` | Extra volumes for the pod | `[]` | -| `defaultTags` | Default tags to apply to all AWS resources managed by this controller | `{}` | -| `replicaCount` | Number of controller pods to run, only one will be active due to leader election | `2` | -| `revisionHistoryLimit` | Number of revisions to keep | `10` | -| `podDisruptionBudget` | Limit the disruption for controller pods. Require at least 2 controller replicas and 3 worker nodes | `{}` | -| `updateStrategy` | Defines the update strategy for the deployment | `{}` | -| `enableCertManager` | If enabled, cert-manager issues the webhook certificates instead of the helm template, requires cert-manager and it's CRDs to be installed | `false` | -| `enableEndpointSlices` | If enabled, controller uses k8s EndpointSlices instead of Endpoints for IP targets | `false` | -| `enableBackendSecurityGroup` | If enabled, controller uses shared security group for backend traffic | `true` | -| `backendSecurityGroup` | Backend security group to use instead of auto created one if the feature is enabled | `` | -| `disableRestrictedSecurityGroupRules` | If disabled, controller will not specify port range restriction in the backend security group rules | `false` | -| `objectSelector.matchExpressions` | Webhook configuration to select specific pods by specifying the expression to be matched | None | -| `objectSelector.matchLabels` | Webhook configuration to select specific pods by specifying the key value label pair to be matched | None | -| `serviceMonitor.enabled` | Specifies whether a service monitor should be created, requires the ServiceMonitor CRD to be installed | `false` | -| `serviceMonitor.namespace` | Namespace in which to create the service monitor | None | -| `serviceMonitor.additionalLabels` | Labels to add to the service monitor | `{}` | -| `serviceMonitor.interval` | Prometheus scrape interval | `1m` | -| `serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `1m` | -| `serviceMonitor.relabelings` | Relabelings to apply to samples before ingestion | `1m` | -| `serviceMonitor.metricRelabelings` | Metric relabelings to apply to samples before ingestion | `1m` | -| `clusterSecretsPermissions.allowAllSecrets` | If `true`, controller has access to all secrets in the cluster. | `false` | -| `controllerConfig.featureGates` | set of `key: value` pairs that describe AWS load balance controller features | `{}` | -| `ingressClassConfig.default` | If `true`, the ingressclass will be the default class of the cluster. | `false` | -| `enableServiceMutatorWebhook` | If `false`, disable the Service Mutator webhook which makes all new services of type LoadBalancer reconciled by the lb controller | `true` | -| `serviceMutatorWebhookConfig.failurePolicy` | Failure policy for the Service Mutator webhook | `Fail` | -| `serviceMutatorWebhookConfig.objectSelector` | Object selector(s) to limit which objects will be mutated by the Service Mutator webhook | `[]` | -| `serviceMutatorWebhookConfig.operations` | List of operations that will trigger the the Service Mutator webhook | `[ CREATE ]` | -| `autoscaling` | If `autoscaling.enabled=true`, enable the HPA on the controller mainly to survive load induced failure by the calls to the `aws-load-balancer-webhook-service`. Please keep in mind that the controller pods have `priorityClassName: system-cluster-critical`, enabling HPA may lead to the eviction of other low-priority pods in the node | `false` | -| `serviceTargetENISGTags` | set of `key=value` pairs of AWS tags in addition to cluster name for finding the target ENI security group to which to add inbound rules from NLBs | None | -| `loadBalancerClass` | Sets the AWS load balancer type to be used when the Kubernetes service requests an external load balancer | `service.k8s.aws/nlb` | -| `creator` | if set to a `value!=helm`, it will disable the addition of default helm labels | `helm` | -| `runtimeClassName` | Runtime class name for the controller pods , such as `gvisor` or `kata`. An unspecified `nil` or empty `""` RuntimeClassName is equivalent to the backwards-compatible default behavior as if the RuntimeClass feature is disabled. | "" | +| Parameter | Description | Default | +|------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------| +| `image.repository` | image repository | `public.ecr.aws/eks/aws-load-balancer-controller` | +| `image.tag` | image tag | `` | +| `image.pullPolicy` | image pull policy | `IfNotPresent` | +| `clusterName` | Kubernetes cluster name | None | +| `cluster.dnsDomain` | DNS domain of the Kubernetes cluster, included in TLS certificate requests | `cluster.local` | +| `securityContext` | Set to security context for pod | `{}` | +| `resources` | Controller pod resource requests & limits | `{}` | +| `priorityClassName` | Controller pod priority class | system-cluster-critical | +| `nodeSelector` | Node labels for controller pod assignment | `{}` | +| `tolerations` | Controller pod toleration for taints | `{}` | +| `affinity` | Affinity for pod assignment | `{}` | +| `configureDefaultAffinity` | Configure soft pod anti-affinity if custom affinity is not configured | `true` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `{}` | +| `deploymentAnnotations` | Annotations to add to deployment | `{}` | +| `podAnnotations` | Annotations to add to each pod | `{}` | +| `podLabels` | Labels to add to each pod | `{}` | +| `additionalLabels` | Labels to add to all components | `{}` | +| `rbac.create` | if `true`, create and use RBAC resources | `true` | +| `serviceAccount.annotations` | optional annotations to add to service account | None | +| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a Service Account | `true` | +| `serviceAccount.imagePullSecrets` | List of image pull secrets to add to the Service Account | `[]` | +| `serviceAccount.create` | If `true`, create a new service account | `true` | +| `serviceAccount.name` | Service account to be used | None | +| `terminationGracePeriodSeconds` | Time period for controller pod to do a graceful shutdown | 10 | +| `ingressClass` | The ingress class to satisfy | alb | +| `createIngressClassResource` | Create ingressClass resource | true | +| `ingressClassParams.name` | IngressClassParams resource's name, default to the aws load balancer controller's name | None | +| `ingressClassParams.create` | If `true`, create a new ingressClassParams | true | +| `ingressClassParams.spec` | IngressClassParams defined ingress specifications | {} | +| `region` | The AWS region for the kubernetes cluster | None | +| `vpcId` | The VPC ID for the Kubernetes cluster | None | +| `awsApiEndpoints` | Custom AWS API Endpoints | None | +| `awsApiThrottle` | Custom AWS API throttle settings | None | +| `awsMaxRetries` | Maximum retries for AWS APIs | None | +| `defaultTargetType` | Default target type. Used as the default value of the `alb.ingress.kubernetes.io/target-type` and `service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" annotations.`Possible values are `ip` and `instance`. | `instance` | +| `enablePodReadinessGateInject` | If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods | None | +| `enableShield` | Enable Shield addon for ALB | None | +| `enableWaf` | Enable WAF addon for ALB | None | +| `enableWafv2` | Enable WAF V2 addon for ALB | None | +| `ingressMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for ingress | None | +| `logLevel` | Set the controller log level - info, debug | None | +| `metricsBindAddr` | The address the metric endpoint binds to | "" | +| `webhookBindPort` | The TCP port the Webhook server binds to | None | +| `webhookTLS.caCert` | TLS CA certificate for webhook (auto-generated if not provided) | "" | +| `webhookTLS.cert` | TLS certificate for webhook (auto-generated if not provided) | "" | +| `webhookTLS.key` | TLS private key for webhook (auto-generated if not provided) | "" | +| `webhookNamespaceSelectors` | Namespace selectors for the wekbook | None | +| `keepTLSSecret` | Reuse existing TLS Secret during chart upgrade | `true` | +| `serviceAnnotations` | Annotations to be added to the provisioned webhook service resource | `{}` | +| `serviceMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for service | None | +| `targetgroupbindingMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for targetGroupBinding | None | +| `targetgroupbindingMaxExponentialBackoffDelay` | Maximum duration of exponential backoff for targetGroupBinding reconcile failures | None | +| `syncPeriod` | Period at which the controller forces the repopulation of its local object stores | None | +| `watchNamespace` | Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched | None | +| `disableIngressClassAnnotation` | Disables the usage of kubernetes.io/ingress.class annotation | None | +| `disableIngressGroupNameAnnotation` | Disables the usage of alb.ingress.kubernetes.io/group.name annotation | None | +| `defaultSSLPolicy` | Specifies the default SSL policy to use for HTTPS or TLS listeners | None | +| `externalManagedTags` | Specifies the list of tag keys on AWS resources that are managed externally | `[]` | +| `livenessProbe` | Liveness probe settings for the controller | (see `values.yaml`) | +| `env` | Environment variables to set for aws-load-balancer-controller pod | None | +| `hostNetwork` | If `true`, use hostNetwork | `false` | +| `dnsPolicy` | Set dnsPolicy if required | `ClusterFirst` | +| `extraVolumeMounts` | Extra volume mounts for the pod | `[]` | +| `extraVolumes` | Extra volumes for the pod | `[]` | +| `defaultTags` | Default tags to apply to all AWS resources managed by this controller | `{}` | +| `replicaCount` | Number of controller pods to run, only one will be active due to leader election | `2` | +| `podDisruptionBudget` | Limit the disruption for controller pods. Require at least 2 controller replicas and 3 worker nodes | `{}` | +| `updateStrategy` | Defines the update strategy for the deployment | `{}` | +| `enableCertManager` | If enabled, cert-manager issues the webhook certificates instead of the helm template, requires cert-manager and it's CRDs to be installed | `false` | +| `enableEndpointSlices` | If enabled, controller uses k8s EndpointSlices instead of Endpoints for IP targets | `false` | +| `enableBackendSecurityGroup` | If enabled, controller uses shared security group for backend traffic | `true` | +| `backendSecurityGroup` | Backend security group to use instead of auto created one if the feature is enabled | `` | +| `disableRestrictedSecurityGroupRules` | If disabled, controller will not specify port range restriction in the backend security group rules | `false` | +| `objectSelector.matchExpressions` | Webhook configuration to select specific pods by specifying the expression to be matched | None | +| `objectSelector.matchLabels` | Webhook configuration to select specific pods by specifying the key value label pair to be matched | None | +| `serviceMonitor.enabled` | Specifies whether a service monitor should be created, requires the ServiceMonitor CRD to be installed | `false` | +| `serviceMonitor.additionalLabels` | Labels to add to the service account | `{}` | +| `serviceMonitor.interval` | Prometheus scrape interval | `1m` | +| `serviceMonitor.namespace` | Namespace in which Prometheus is running | None | +| `clusterSecretsPermissions.allowAllSecrets` | If `true`, controller has access to all secrets in the cluster. | `false` | +| `controllerConfig.featureGates` | set of `key: value` pairs that describe AWS load balance controller features | `{}` | +| `ingressClassConfig.default` | If `true`, the ingressclass will be the default class of the cluster. | `false` | +| `enableServiceMutatorWebhook` | If `false`, disable the Service Mutator webhook which makes all new services of type LoadBalancer reconciled by the lb controller | `true` | diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/crds/crds.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/crds/crds.yaml index b72e68789..78c226660 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/crds/crds.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/crds/crds.yaml @@ -2,7 +2,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null name: ingressclassparams.elbv2.k8s.aws spec: group: elbv2.k8s.aws @@ -35,31 +36,20 @@ spec: description: IngressClassParams is the Schema for the IngressClassParams API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: IngressClassParamsSpec defines the desired state of IngressClassParams properties: - certificateArn: - description: CertificateArn specifies the ARN of the certificates - for all Ingresses that belong to IngressClass with this IngressClassParams. - items: - type: string - type: array group: description: Group defines the IngressGroup for all Ingresses that belong to IngressClass with this IngressClassParams. @@ -82,38 +72,7 @@ spec: enum: - ipv4 - dualstack - - dualstack-without-public-ipv4 type: string - listeners: - description: Listeners define a list of listeners with their protocol, - port and attributes. - items: - properties: - listenerAttributes: - description: The attributes of the listener - items: - description: Attributes defines custom attributes on resources. - properties: - key: - description: The key of the attribute. - type: string - value: - description: The value of the attribute. - type: string - required: - - key - - value - type: object - type: array - port: - description: The port of the listener - format: int32 - type: integer - protocol: - description: The protocol of the listener - type: string - type: object - type: array loadBalancerAttributes: description: LoadBalancerAttributes define the custom attributes to LoadBalancers for all Ingress that that belong to IngressClass with @@ -132,63 +91,50 @@ spec: - value type: object type: array - minimumLoadBalancerCapacity: - description: MinimumLoadBalancerCapacity define the capacity reservation - for LoadBalancers for all Ingress that belong to IngressClass with - this IngressClassParams. - properties: - capacityUnits: - description: The Capacity Units Value. - format: int32 - type: integer - required: - - capacityUnits - type: object namespaceSelector: - description: |- - NamespaceSelector restrict the namespaces of Ingresses that are allowed to specify the IngressClass with this IngressClassParams. + description: NamespaceSelector restrict the namespaces of Ingresses + that are allowed to specify the IngressClass with this IngressClassParams. * if absent or present but empty, it selects all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array - x-kubernetes-list-type: atomic required: - key - operator type: object type: array - x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -221,11 +167,10 @@ spec: items: type: string type: array - description: |- - Tags specifies subnets in the load balancer's VPC where each - tag specified in the map key contains one of the values in the corresponding - value list. - Exactly one of this or `ids` must be specified. + description: Tags specifies subnets in the load balancer's VPC + where each tag specified in the map key contains one of the + values in the corresponding value list. Exactly one of this + or `ids` must be specified. type: object type: object tags: @@ -255,7 +200,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null name: targetgroupbindings.elbv2.k8s.aws spec: group: elbv2.k8s.aws @@ -284,11 +230,6 @@ spec: name: ARN priority: 1 type: string - - description: The AWS TargetGroup's Name - jsonPath: .spec.targetGroupName - name: NAME - priority: 2 - type: string - jsonPath: .metadata.creationTimestamp name: AGE type: date @@ -298,29 +239,20 @@ spec: description: TargetGroupBinding is the Schema for the TargetGroupBinding API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding properties: - multiClusterTargetGroup: - description: MultiClusterTargetGroup Denotes if the TargetGroup is - shared among multiple clusters - type: boolean networking: description: networking provides the networking setup for ELBV2 LoadBalancer to access targets in TargetGroup. @@ -331,30 +263,28 @@ spec: items: properties: from: - description: |- - List of peers which should be able to access the targets in TargetGroup. - At least one NetworkingPeer should be specified. + description: List of peers which should be able to access + the targets in TargetGroup. At least one NetworkingPeer + should be specified. items: description: NetworkingPeer defines the source/destination peer for networking rules. properties: ipBlock: - description: |- - IPBlock defines an IPBlock peer. - If specified, none of the other fields can be set. + description: IPBlock defines an IPBlock peer. If specified, + none of the other fields can be set. properties: cidr: - description: |- - CIDR is the network CIDR. - Both IPV4 or IPV6 CIDR are accepted. + description: CIDR is the network CIDR. Both IPV4 + or IPV6 CIDR are accepted. type: string required: - cidr type: object securityGroup: - description: |- - SecurityGroup defines a SecurityGroup peer. - If specified, none of the other fields can be set. + description: SecurityGroup defines a SecurityGroup + peer. If specified, none of the other fields can + be set. properties: groupID: description: GroupID is the EC2 SecurityGroupID. @@ -365,24 +295,24 @@ spec: type: object type: array ports: - description: |- - List of ports which should be made accessible on the targets in TargetGroup. - If ports is empty or unspecified, it defaults to all ports with TCP. + description: List of ports which should be made accessible + on the targets in TargetGroup. If ports is empty or unspecified, + it defaults to all ports with TCP. items: properties: port: anyOf: - type: integer - type: string - description: |- - The port which traffic must match. - When NodePort endpoints(instance TargetType) is used, this must be a numerical port. - When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. - if port is unspecified, it defaults to all ports. + description: The port which traffic must match. When + NodePort endpoints(instance TargetType) is used, + this must be a numerical port. When Port endpoints(ip + TargetType) is used, this can be either numerical + or named port on pods. if port is unspecified, it + defaults to all ports. x-kubernetes-int-or-string: true protocol: - description: |- - The protocol which traffic must match. + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. enum: - TCP @@ -417,9 +347,6 @@ spec: description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. type: string - targetGroupName: - description: targetGroupName is the Name of the TargetGroup. - type: string targetType: description: targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. @@ -429,6 +356,7 @@ spec: type: string required: - serviceRef + - targetGroupARN type: object status: description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding @@ -461,11 +389,6 @@ spec: name: ARN priority: 1 type: string - - description: The AWS TargetGroup's Name - jsonPath: .spec.targetGroupName - name: NAME - priority: 2 - type: string - jsonPath: .metadata.creationTimestamp name: AGE type: date @@ -475,19 +398,14 @@ spec: description: TargetGroupBinding is the Schema for the TargetGroupBinding API properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -501,10 +419,6 @@ spec: - ipv4 - ipv6 type: string - multiClusterTargetGroup: - description: MultiClusterTargetGroup Denotes if the TargetGroup is - shared among multiple clusters - type: boolean networking: description: networking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. @@ -517,30 +431,28 @@ spec: of traffic that is allowed to access TargetGroup's targets. properties: from: - description: |- - List of peers which should be able to access the targets in TargetGroup. - At least one NetworkingPeer should be specified. + description: List of peers which should be able to access + the targets in TargetGroup. At least one NetworkingPeer + should be specified. items: description: NetworkingPeer defines the source/destination peer for networking rules. properties: ipBlock: - description: |- - IPBlock defines an IPBlock peer. - If specified, none of the other fields can be set. + description: IPBlock defines an IPBlock peer. If specified, + none of the other fields can be set. properties: cidr: - description: |- - CIDR is the network CIDR. - Both IPV4 or IPV6 CIDR are accepted. + description: CIDR is the network CIDR. Both IPV4 + or IPV6 CIDR are accepted. type: string required: - cidr type: object securityGroup: - description: |- - SecurityGroup defines a SecurityGroup peer. - If specified, none of the other fields can be set. + description: SecurityGroup defines a SecurityGroup + peer. If specified, none of the other fields can + be set. properties: groupID: description: GroupID is the EC2 SecurityGroupID. @@ -551,9 +463,9 @@ spec: type: object type: array ports: - description: |- - List of ports which should be made accessible on the targets in TargetGroup. - If ports is empty or unspecified, it defaults to all ports with TCP. + description: List of ports which should be made accessible + on the targets in TargetGroup. If ports is empty or unspecified, + it defaults to all ports with TCP. items: description: NetworkingPort defines the port and protocol for networking rules. @@ -562,15 +474,15 @@ spec: anyOf: - type: integer - type: string - description: |- - The port which traffic must match. - When NodePort endpoints(instance TargetType) is used, this must be a numerical port. - When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. - if port is unspecified, it defaults to all ports. + description: The port which traffic must match. When + NodePort endpoints(instance TargetType) is used, + this must be a numerical port. When Port endpoints(ip + TargetType) is used, this can be either numerical + or named port on pods. if port is unspecified, it + defaults to all ports. x-kubernetes-int-or-string: true protocol: - description: |- - The protocol which traffic must match. + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. enum: - TCP @@ -592,42 +504,41 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array - x-kubernetes-list-type: atomic required: - key - operator type: object type: array - x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -651,9 +562,7 @@ spec: targetGroupARN: description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. - type: string - targetGroupName: - description: targetGroupName is the Name of the TargetGroup. + minLength: 1 type: string targetType: description: targetType is the TargetType of TargetGroup. If unspecified, @@ -662,12 +571,9 @@ spec: - instance - ip type: string - vpcID: - description: VpcID is the VPC of the TargetGroup. If unspecified, - it will be automatically inferred. - type: string required: - serviceRef + - targetGroupARN type: object status: description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/_helpers.tpl b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/_helpers.tpl index d916b99c4..660f6ee9d 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/_helpers.tpl +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/_helpers.tpl @@ -45,14 +45,12 @@ This enables using a shorter name for the resources, for example aws-load-balanc Common labels */}} {{- define "aws-load-balancer-controller.labels" -}} -{{- if eq (default "helm" .Values.creator) "helm" -}} -app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "aws-load-balancer-controller.chart" . }} -{{- end }} {{ include "aws-load-balancer-controller.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} {{- if .Values.additionalLabels }} {{ toYaml .Values.additionalLabels }} {{- end -}} diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/deployment.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/deployment.yaml index 4506d489e..e2b5225ff 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/deployment.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/deployment.yaml @@ -11,7 +11,6 @@ metadata: {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} spec: replicas: {{ .Values.replicaCount }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} selector: matchLabels: {{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }} @@ -38,9 +37,6 @@ spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.runtimeClassName }} - runtimeClassName: {{ .Values.runtimeClassName }} {{- end }} serviceAccountName: {{ include "aws-load-balancer-controller.serviceAccountName" . }} volumes: @@ -62,17 +58,15 @@ spec: containers: - name: {{ .Chart.Name }} args: - - --cluster-name={{ required "Chart cannot be installed without a valid clusterName!" (tpl (default "" .Values.clusterName) .) }} + - --cluster-name={{ required "Chart cannot be installed without a valid clusterName!" .Values.clusterName }} {{- if .Values.ingressClass }} - --ingress-class={{ .Values.ingressClass }} {{- end }} - {{- $region := tpl (default "" .Values.region) . }} - {{- if $region }} + {{- if .Values.region }} - --aws-region={{ .Values.region }} {{- end }} - {{- $vpcID := tpl (default "" .Values.vpcId) . }} - {{- if $vpcID }} - - --aws-vpc-id={{ $vpcID }} + {{- if .Values.vpcId }} + - --aws-vpc-id={{ .Values.vpcId }} {{- end }} {{- if .Values.awsApiEndpoints }} - --aws-api-endpoints={{ .Values.awsApiEndpoints }} @@ -110,9 +104,6 @@ spec: {{- if .Values.targetgroupbindingMaxExponentialBackoffDelay }} - --targetgroupbinding-max-exponential-backoff-delay={{ .Values.targetgroupbindingMaxExponentialBackoffDelay }} {{- end }} - {{- if .Values.lbStabilizationMonitorInterval }} - - --lb-stabilization-monitor-interval={{ .Values.lbStabilizationMonitorInterval }} - {{- end }} {{- if .Values.logLevel }} - --log-level={{ .Values.logLevel }} {{- end }} @@ -131,12 +122,6 @@ spec: {{- if kindIs "bool" .Values.disableIngressGroupNameAnnotation }} - --disable-ingress-group-name-annotation={{ .Values.disableIngressGroupNameAnnotation }} {{- end }} - {{- if kindIs "bool" .Values.tolerateNonExistentBackendService }} - - --tolerate-non-existent-backend-service={{ .Values.tolerateNonExistentBackendService }} - {{- end }} - {{- if kindIs "bool" .Values.tolerateNonExistentBackendAction }} - - --tolerate-non-existent-backend-action={{ .Values.tolerateNonExistentBackendAction }} - {{- end }} {{- if .Values.defaultSSLPolicy }} - --default-ssl-policy={{ .Values.defaultSSLPolicy }} {{- end }} @@ -164,42 +149,13 @@ spec: {{- if ne .Values.defaultTargetType "instance" }} - --default-target-type={{ .Values.defaultTargetType }} {{- end }} - {{- if .Values.serviceTargetENISGTags }} - - --service-target-eni-security-group-tags={{ .Values.serviceTargetENISGTags }} - {{- end }} - {{- if .Values.certDiscovery.allowedCertificateAuthorityARNs }} - - --allowed-certificate-authority-arns={{ .Values.certDiscovery.allowedCertificateAuthorityARNs }} - {{- end }} - {{- if .Values.loadBalancerClass }} - - --load-balancer-class={{ .Values.loadBalancerClass }} - {{- end }} - {{- if or .Values.env .Values.envSecretName }} + {{- if .Values.env }} env: - {{- if .Values.env}} {{- range $key, $value := .Values.env }} - name: {{ $key }} value: "{{ $value }}" {{- end }} {{- end }} - {{- if .Values.envSecretName }} - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: {{ .Values.envSecretName }} - key: key_id - optional: true - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: {{ .Values.envSecretName }} - key: access_key - optional: true - {{- end }} - {{- end }} - {{- if .Values.envFrom }} - envFrom: - {{- toYaml .Values.envFrom | nindent 10 }} - {{- end }} securityContext: {{- toYaml .Values.securityContext | nindent 10 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" @@ -224,10 +180,6 @@ spec: livenessProbe: {{- toYaml . | nindent 10 }} {{- end }} - {{- with .Values.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 10 }} - {{- end }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} {{- with .Values.nodeSelector }} nodeSelector: diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/hpa.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/hpa.yaml deleted file mode 100644 index 68689ba66..000000000 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/hpa.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -{{- if (semverCompare ">=1.23-0" .Capabilities.KubeVersion.Version)}} -apiVersion: autoscaling/v2 -{{- else }} -apiVersion: autoscaling/v2beta2 -{{- end }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "aws-load-balancer-controller.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} - annotations: - {{- .Values.annotations | toYaml | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "aws-load-balancer-controller.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ required "A valid .Values.autoscaling.maxReplicas value is required" .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - type: Utilization - {{- end }} - {{- if .Values.autoscaling.autoscaleBehavior }} - behavior: {{ toYaml .Values.autoscaling.autoscaleBehavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/rbac.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/rbac.yaml index 0dcc68c77..fc3bda695 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/rbac.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/rbac.yaml @@ -75,9 +75,6 @@ rules: - apiGroups: [""] resources: [nodes, namespaces, endpoints] verbs: [get, list, watch] -- apiGroups: [""] - resources: [configmaps] - verbs: [get, delete, create, update] {{- if .Values.clusterSecretsPermissions.allowAllSecrets }} - apiGroups: [""] resources: [secrets] diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/servicemonitor.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/servicemonitor.yaml index 0454558c2..c811be253 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/servicemonitor.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/servicemonitor.yaml @@ -3,14 +3,18 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ include "aws-load-balancer-controller.fullname" . }} - namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} labels: {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} - {{- with .Values.serviceMonitor.additionalLabels }} + {{- with .Values.serviceMonitor.additionalLabels }} {{- toYaml . | nindent 4 }} - {{- end }} + {{- end }} spec: - jobLabel: app.kubernetes.io/instance + jobLabel: {{ .Release.Name }} namespaceSelector: matchNames: - {{ .Release.Namespace }} @@ -25,19 +29,7 @@ spec: endpoints: - port: metrics-server path: /metrics - scheme: http - {{- with .Values.serviceMonitor.interval }} + {{- with .Values.serviceMonitor.interval }} interval: {{ . }} - {{- end }} - {{- with .Values.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - {{- with .Values.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.serviceMonitor.metricRelabelings }} - metricRelabelings: - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end -}} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/webhook.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/webhook.yaml index 504f08ccb..e7d557e41 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/templates/webhook.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/templates/webhook.yaml @@ -65,7 +65,7 @@ webhooks: name: {{ template "aws-load-balancer-controller.webhookService" . }} namespace: {{ $.Release.Namespace }} path: /mutate-v1-service - failurePolicy: {{ .Values.serviceMutatorWebhookConfig.failurePolicy }} + failurePolicy: Fail name: mservice.elbv2.k8s.aws admissionReviewVersions: - v1beta1 @@ -75,21 +75,13 @@ webhooks: operator: NotIn values: - {{ include "aws-load-balancer-controller.name" . }} - {{- if .Values.serviceMutatorWebhookConfig.objectSelector.matchExpressions }} - {{- toYaml .Values.serviceMutatorWebhookConfig.objectSelector.matchExpressions | nindent 4 }} - {{- end }} - - {{- if .Values.serviceMutatorWebhookConfig.objectSelector.matchLabels }} - matchLabels: - {{- toYaml .Values.serviceMutatorWebhookConfig.objectSelector.matchLabels | nindent 6 }} - {{- end }} rules: - apiGroups: - "" apiVersions: - v1 operations: - {{- toYaml .Values.serviceMutatorWebhookConfig.operations | nindent 4 }} + - CREATE resources: - services sideEffects: None @@ -181,7 +173,6 @@ webhooks: resources: - targetgroupbindings sideEffects: None -{{- if not $.Values.webhookConfig.disableIngressValidation }} - clientConfig: {{ if not $.Values.enableCertManager -}} caBundle: {{ $tls.caCert }} @@ -206,7 +197,6 @@ webhooks: resources: - ingresses sideEffects: None -{{- end }} --- {{- if not $.Values.enableCertManager }} apiVersion: v1 diff --git a/internal/constellation/helm/charts/aws-load-balancer-controller/values.yaml b/internal/constellation/helm/charts/aws-load-balancer-controller/values.yaml index c2f465bcd..dea199559 100644 --- a/internal/constellation/helm/charts/aws-load-balancer-controller/values.yaml +++ b/internal/constellation/helm/charts/aws-load-balancer-controller/values.yaml @@ -4,29 +4,15 @@ replicaCount: 2 -revisionHistoryLimit: 10 - image: repository: public.ecr.aws/eks/aws-load-balancer-controller - tag: v2.11.0 + tag: v2.5.3 pullPolicy: IfNotPresent -runtimeClassName: "" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" -# AWS LBC only has 1 main working pod, other pods are just standby -# the purpose of enable hpa is to survive load induced failure by the calls to the aws-load-balancer-webhook-service -# since the calls from kube-apiserver are sent round-robin to all replicas, and the failure policy on those webhooks is Fail -# if the pods become overloaded and do not respond within the timeout that could block the creation of pods, targetgroupbindings or ingresses -# Please keep in mind that the controller pods have `priorityClassName: system-cluster-critical`, enabling HPA may lead to the eviction of other low-priority pods in the node -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPUUtilizationPercentage: 80 - serviceAccount: # Specifies whether a service account should be created create: true @@ -120,8 +106,8 @@ clusterName: # cluster contains configurations specific to the kubernetes cluster cluster: - # Cluster DNS domain (required for requesting TLS certificates) - dnsDomain: cluster.local + # Cluster DNS domain (required for requesting TLS certificates) + dnsDomain: cluster.local # The ingress class this controller will satisfy. If not specified, controller will match all # ingresses without ingress class annotation and ingresses of type alb @@ -201,10 +187,6 @@ logLevel: # The address the metric endpoint binds to. (default ":8080") metricsBindAddr: "" -webhookConfig: - # disableIngressValidation disables the validation of resources of kind Ingress, false by default - disableIngressValidation: - # The TCP port the Webhook server binds to. (default 9443) webhookBindPort: @@ -214,7 +196,7 @@ webhookTLS: cert: key: -# array of namespace selectors for the pod mutator webhook +# array of namespace selectors for the webhook webhookNamespaceSelectors: # - key: elbv2.k8s.aws/pod-readiness-gate-inject # operator: In @@ -233,10 +215,7 @@ targetgroupbindingMaxConcurrentReconciles: # Maximum duration of exponential backoff for targetGroupBinding reconcile failures targetgroupbindingMaxExponentialBackoffDelay: -# Interval at which the controller monitors the state of load balancer after creation for stabilization -lbStabilizationMonitorInterval: - -# Period at which the controller forces the repopulation of its local object stores. (default 10h0m0s) +# Period at which the controller forces the repopulation of its local object stores. (default 1h0m0s) syncPeriod: # Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched. @@ -248,12 +227,6 @@ disableIngressClassAnnotation: # disableIngressGroupNameAnnotation disables the usage of alb.ingress.kubernetes.io/group.name annotation, false by default disableIngressGroupNameAnnotation: -# tolerateNonExistentBackendService permits rules which specify backend services that don't exist, true by default (When enabled, it will return 503 error if backend service not exist) -tolerateNonExistentBackendService: - -# tolerateNonExistentBackendAction permits rules which specify backend actions that don't exist, true by default (When enabled, it will return 503 error if backend action not exist) -tolerateNonExistentBackendAction: - # defaultSSLPolicy specifies the default SSL policy to use for TLS/HTTPS listeners defaultSSLPolicy: @@ -267,17 +240,6 @@ livenessProbe: initialDelaySeconds: 30 timeoutSeconds: 10 -# readiness probe configuration for the controller -readinessProbe: - failureThreshold: 2 - httpGet: - path: /readyz - port: 61779 - scheme: HTTP - successThreshold: 1 - initialDelaySeconds: 10 - timeoutSeconds: 10 - # Environment variables to set for aws-load-balancer-controller pod. # We strongly discourage programming access credentials in the controller environment. You should setup IRSA or # comparable solutions like kube2iam, kiam etc instead. @@ -285,15 +247,8 @@ env: # ENV_1: "" # ENV_2: "" -# Use Environment variables credentials from Secret (aws-secret) for aws-load-balancer-controller pod similarly as The EBS CSI Driver does. -# envSecretName: aws-secret - -# Use envFrom to set environment variables from a Secret or ConfigMap -# envFrom: -# - secretRef: -# name: my-secret - # Specifies if aws-load-balancer-controller should be started in hostNetwork mode. +# # This is required if using a custom CNI where the managed control plane nodes are unable to initiate # network connections to the pods, for example using Calico CNI plugin on EKS. This is not required or # recommended if using the Amazon VPC CNI plugin. @@ -360,11 +315,6 @@ controllerConfig: # EnableIPTargetType: true # SubnetsClusterTagCheck: true # NLBHealthCheckAdvancedConfig: true - # ALBSingleSubnet: false - # LBCapacityReservation: true - -certDiscovery: - allowedCertificateAuthorityARNs: "" # empty means all CAs are in scope # objectSelector for webhook objectSelector: @@ -379,18 +329,12 @@ objectSelector: serviceMonitor: # Specifies whether a service monitor should be created enabled: false - # Namespace to create the service monitor in - namespace: - # Labels to add to the service monitor + # Labels to add to the service account additionalLabels: {} # Prometheus scrape interval interval: 1m - # Prometheus scrape timeout - scrapeTimeout: - # Relabelings to apply to samples before ingestion - relabelings: - # Metric relabelings to apply to samples before ingestion - metricRelabelings: + # Namespace to create the service monitor in + namespace: # clusterSecretsPermissions lets you configure RBAC permissions for secret resources # Access to secrets resource is required only if you use the OIDC feature, and instead of @@ -407,30 +351,3 @@ ingressClassConfig: # enableServiceMutatorWebhook allows you enable the webhook which makes this controller the default for all new services of type LoadBalancer enableServiceMutatorWebhook: true - -# serviceMutatorWebhook contains configurations specific to the service mutator webhook -serviceMutatorWebhookConfig: - # whether or not to fail the service creation if the webhook fails - failurePolicy: Fail - # limit webhook to only mutate services matching the objectSelector - objectSelector: - matchExpressions: [] - # - key: - # operator: - # values: - # - - matchLabels: {} - # key: value - # which operations trigger the webhook - operations: - - CREATE - # - UPDATE - -# serviceTargetENISGTags specifies AWS tags, in addition to the cluster tags, for finding the target ENI SG to which to add inbound rules from NLBs. -serviceTargetENISGTags: - -# Specifies the class of load balancer to use for services. This affects how services are provisioned if type LoadBalancer is used (default service.k8s.aws/nlb) -loadBalancerClass: - -# creator will disable helm default labels, so you can only add yours -# creator: "me" diff --git a/internal/constellation/helm/charts/cert-manager/Chart.yaml b/internal/constellation/helm/charts/cert-manager/Chart.yaml index aea96934f..7a8e8043b 100644 --- a/internal/constellation/helm/charts/cert-manager/Chart.yaml +++ b/internal/constellation/helm/charts/cert-manager/Chart.yaml @@ -1,15 +1,13 @@ annotations: - artifacthub.io/category: security - artifacthub.io/license: Apache-2.0 artifacthub.io/prerelease: "false" artifacthub.io/signKey: | fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg -apiVersion: v2 -appVersion: v1.15.0 +apiVersion: v1 +appVersion: v1.12.6 description: A Helm chart for cert-manager -home: https://cert-manager.io -icon: https://raw.githubusercontent.com/cert-manager/community/4d35a69437d21b76322157e6284be4cd64e6d2b7/logo/logo-small.png +home: https://github.com/cert-manager/cert-manager +icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png keywords: - cert-manager - kube-lego @@ -23,4 +21,4 @@ maintainers: name: cert-manager sources: - https://github.com/cert-manager/cert-manager -version: v1.15.0 +version: v1.12.6 diff --git a/internal/constellation/helm/charts/cert-manager/templates/NOTES.txt b/internal/constellation/helm/charts/cert-manager/templates/NOTES.txt index 341d10123..102535460 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/NOTES.txt +++ b/internal/constellation/helm/charts/cert-manager/templates/NOTES.txt @@ -1,6 +1,3 @@ -{{- if .Values.installCRDs }} -⚠️ WARNING: `installCRDs` is deprecated, use `crds.enabled` instead. -{{- end }} cert-manager {{ .Chart.AppVersion }} has been deployed successfully! In order to begin issuing certificates, you will need to set up a ClusterIssuer diff --git a/internal/constellation/helm/charts/cert-manager/templates/_helpers.tpl b/internal/constellation/helm/charts/cert-manager/templates/_helpers.tpl index 9902c089f..90db4af26 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/_helpers.tpl +++ b/internal/constellation/helm/charts/cert-manager/templates/_helpers.tpl @@ -172,31 +172,3 @@ https://github.com/helm/helm/issues/5358 {{- define "cert-manager.namespace" -}} {{ .Values.namespace | default .Release.Namespace }} {{- end -}} - -{{/* -Util function for generating the image URL based on the provided options. -IMPORTANT: This function is standarized across all charts in the cert-manager GH organization. -Any changes to this function should also be made in cert-manager, trust-manager, approver-policy, ... -See https://github.com/cert-manager/cert-manager/issues/6329 for a list of linked PRs. -*/}} -{{- define "image" -}} -{{- $defaultTag := index . 1 -}} -{{- with index . 0 -}} -{{- if .registry -}}{{ printf "%s/%s" .registry .repository }}{{- else -}}{{- .repository -}}{{- end -}} -{{- if .digest -}}{{ printf "@%s" .digest }}{{- else -}}{{ printf ":%s" (default $defaultTag .tag) }}{{- end -}} -{{- end }} -{{- end }} - -{{/* -Check that the user has not set both .installCRDs and .crds.enabled or -set .installCRDs and disabled .crds.keep. -.installCRDs is deprecated and users should use .crds.enabled and .crds.keep instead. -*/}} -{{- define "cert-manager.crd-check" -}} - {{- if and (.Values.installCRDs) (.Values.crds.enabled) }} - {{- fail "ERROR: the deprecated .installCRDs option cannot be enabled at the same time as its replacement .crds.enabled" }} - {{- end }} - {{- if and (.Values.installCRDs) (not .Values.crds.keep) }} - {{- fail "ERROR: .crds.keep is not compatible with .installCRDs, please use .crds.enabled and .crds.keep instead" }} - {{- end }} -{{- end -}} diff --git a/internal/constellation/helm/charts/cert-manager/templates/cainjector-config.yaml b/internal/constellation/helm/charts/cert-manager/templates/cainjector-config.yaml deleted file mode 100644 index 82399cc1a..000000000 --- a/internal/constellation/helm/charts/cert-manager/templates/cainjector-config.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.cainjector.config -}} -{{- $_ := .Values.cainjector.config.apiVersion | required ".Values.cainjector.config.apiVersion must be set !" -}} -{{- $_ := .Values.cainjector.config.kind | required ".Values.cainjector.config.kind must be set !" -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "cainjector.fullname" . }} - namespace: {{ include "cert-manager.namespace" . }} - labels: - app: {{ include "cainjector.name" . }} - app.kubernetes.io/name: {{ include "cainjector.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: "cainjector" - {{- include "labels" . | nindent 4 }} -data: - config.yaml: | - {{- .Values.cainjector.config | toYaml | nindent 4 }} -{{- end -}} \ No newline at end of file diff --git a/internal/constellation/helm/charts/cert-manager/templates/cainjector-deployment.yaml b/internal/constellation/helm/charts/cert-manager/templates/cainjector-deployment.yaml index 8f9f7f331..122017374 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/cainjector-deployment.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/cainjector-deployment.yaml @@ -16,10 +16,6 @@ metadata: {{- end }} spec: replicas: {{ .Values.cainjector.replicaCount }} - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.revisionHistoryLimit) (list "" (quote ""))) }} - revisionHistoryLimit: {{ .Values.global.revisionHistoryLimit }} - {{- end }} selector: matchLabels: app.kubernetes.io/name: {{ include "cainjector.name" . }} @@ -49,7 +45,6 @@ spec: {{- if hasKey .Values.cainjector "automountServiceAccountToken" }} automountServiceAccountToken: {{ .Values.cainjector.automountServiceAccountToken }} {{- end }} - enableServiceLinks: {{ .Values.cainjector.enableServiceLinks }} {{- with .Values.global.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} @@ -59,16 +54,14 @@ spec: {{- end }} containers: - name: {{ .Chart.Name }}-cainjector - image: "{{ template "image" (tuple .Values.cainjector.image $.Chart.AppVersion) }}" + {{- with .Values.cainjector.image }} + image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.cainjector.image.pullPolicy }} args: - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.logLevel) (list "" (quote ""))) }} + {{- if .Values.global.logLevel }} - --v={{ .Values.global.logLevel }} {{- end }} - {{- if .Values.cainjector.config }} - - --config=/var/cert-manager/config/config.yaml - {{- end }} {{- with .Values.global.leaderElection }} - --leader-election-namespace={{ .namespace }} {{- if .leaseDuration }} @@ -81,9 +74,6 @@ spec: - --leader-election-retry-period={{ .retryPeriod }} {{- end }} {{- end }} - {{- with .Values.cainjector.featureGates}} - - --feature-gates={{ . }} - {{- end}} {{- with .Values.cainjector.extraArgs }} {{- toYaml . | nindent 10 }} {{- end }} @@ -100,15 +90,9 @@ spec: resources: {{- toYaml . | nindent 12 }} {{- end }} - {{- if or .Values.cainjector.config .Values.cainjector.volumeMounts }} + {{- with .Values.cainjector.volumeMounts }} volumeMounts: - {{- if .Values.cainjector.config }} - - name: config - mountPath: /var/cert-manager/config - {{- end }} - {{- with .Values.cainjector.volumeMounts }} {{- toYaml . | nindent 12 }} - {{- end }} {{- end }} {{- with .Values.cainjector.nodeSelector }} nodeSelector: @@ -126,15 +110,8 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} - {{- if or .Values.cainjector.volumes .Values.cainjector.config }} + {{- with .Values.cainjector.volumes }} volumes: - {{- if .Values.cainjector.config }} - - name: config - configMap: - name: {{ include "cainjector.fullname" . }} - {{- end }} - {{ with .Values.cainjector.volumes }} {{- toYaml . | nindent 8 }} - {{- end }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml b/internal/constellation/helm/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml index 6a7d60913..f080b753a 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/cainjector-poddisruptionbudget.yaml @@ -17,13 +17,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: "cainjector" - {{- if not (or (hasKey .Values.cainjector.podDisruptionBudget "minAvailable") (hasKey .Values.cainjector.podDisruptionBudget "maxUnavailable")) }} - minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set + {{- with .Values.cainjector.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} {{- end }} - {{- if hasKey .Values.cainjector.podDisruptionBudget "minAvailable" }} - minAvailable: {{ .Values.cainjector.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if hasKey .Values.cainjector.podDisruptionBudget "maxUnavailable" }} - maxUnavailable: {{ .Values.cainjector.podDisruptionBudget.maxUnavailable }} + {{- with .Values.cainjector.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/controller-config.yaml b/internal/constellation/helm/charts/cert-manager/templates/controller-config.yaml index 25f62ef1d..a1b337572 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/controller-config.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/controller-config.yaml @@ -1,6 +1,12 @@ {{- if .Values.config -}} -{{- $_ := .Values.config.apiVersion | required ".Values.config.apiVersion must be set !" -}} -{{- $_ := .Values.config.kind | required ".Values.config.kind must be set !" -}} + {{- if not .Values.config.apiVersion -}} + {{- fail "config.apiVersion must be set" -}} + {{- end -}} + + {{- if not .Values.config.kind -}} + {{- fail "config.kind must be set" -}} + {{- end -}} +{{- end -}} apiVersion: v1 kind: ConfigMap metadata: @@ -13,6 +19,7 @@ metadata: app.kubernetes.io/component: "controller" {{- include "labels" . | nindent 4 }} data: + {{- if .Values.config }} config.yaml: | - {{- .Values.config | toYaml | nindent 4 }} -{{- end -}} \ No newline at end of file + {{ .Values.config | toYaml | nindent 4 }} + {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/crds.yaml b/internal/constellation/helm/charts/cert-manager/templates/crds.yaml index 2c70ca34c..820698742 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/crds.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/crds.yaml @@ -1,13 +1,8 @@ -# {{- include "cert-manager.crd-check" . }} -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} +{{- if .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificaterequests.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' @@ -56,91 +51,47 @@ spec: type: date schema: openAPIV3Schema: - description: |- - A CertificateRequest is used to request a signed certificate from one of the - configured issuers. - - - All fields within the CertificateRequest's `spec` are immutable after creation. - A CertificateRequest will either succeed or fail, as denoted by its `Ready` status - condition and its `status.failureTime` field. - - - A CertificateRequest is a one-shot resource, meaning it represents a single - point in time request for a certificate and cannot be re-used. + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." type: object + required: + - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: |- - Specification of the desired state of the CertificateRequest resource. - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + description: Desired state of the CertificateRequest resource. type: object required: - issuerRef - request properties: duration: - description: |- - Requested 'duration' (i.e. lifetime) of the Certificate. Note that the - issuer may choose to ignore the requested duration, just like any other - requested attribute. + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. type: string extra: - description: |- - Extra contains extra attributes of the user that created the CertificateRequest. - Populated by the cert-manager webhook on creation and immutable. + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. type: object additionalProperties: type: array items: type: string groups: - description: |- - Groups contains group membership of the user that created the CertificateRequest. - Populated by the cert-manager webhook on creation and immutable. + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. type: array items: type: string x-kubernetes-list-type: atomic isCA: - description: |- - Requested basic constraints isCA value. Note that the issuer may choose - to ignore the requested isCA value, just like any other requested attribute. - - - NOTE: If the CSR in the `Request` field has a BasicConstraints extension, - it must have the same isCA value as specified here. - - - If true, this will automatically add the `cert sign` usage to the list - of requested `usages`. + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: |- - Reference to the issuer responsible for issuing the certificate. - If the issuer is namespace-scoped, it must be in the same namespace - as the Certificate. If the issuer is cluster-scoped, it can be used - from any namespace. - - - The `name` field of the reference must always be specified. + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. type: object required: - name @@ -155,69 +106,17 @@ spec: description: Name of the resource being referred to. type: string request: - description: |- - The PEM-encoded X.509 certificate signing request to be submitted to the - issuer for signing. - - - If the CSR has a BasicConstraints extension, its isCA attribute must - match the `isCA` value of this CertificateRequest. - If the CSR has a KeyUsage extension, its key usages must match the - key usages in the `usages` field of this CertificateRequest. - If the CSR has a ExtKeyUsage extension, its extended key usages - must match the extended key usages in the `usages` field of this - CertificateRequest. + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. type: string format: byte uid: - description: |- - UID contains the uid of the user that created the CertificateRequest. - Populated by the cert-manager webhook on creation and immutable. + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. type: string usages: - description: |- - Requested key usages and extended key usages. - - - NOTE: If the CSR in the `Request` field has uses the KeyUsage or - ExtKeyUsage extension, these extensions must have the same values - as specified here without any additional values. - - - If unset, defaults to `digital signature` and `key encipherment`. + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: |- - KeyUsage specifies valid usage contexts for keys. - See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - - - Valid KeyUsage values are as follows: - "signing", - "digital signature", - "content commitment", - "key encipherment", - "key agreement", - "data encipherment", - "cert sign", - "crl sign", - "encipher only", - "decipher only", - "any", - "server auth", - "client auth", - "code signing", - "email protection", - "s/mime", - "ipsec end system", - "ipsec tunnel", - "ipsec user", - "timestamping", - "ocsp signing", - "microsoft sgc", - "netscape sgc" + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" type: string enum: - signing @@ -244,39 +143,22 @@ spec: - microsoft sgc - netscape sgc username: - description: |- - Username contains the name of the user that created the CertificateRequest. - Populated by the cert-manager webhook on creation and immutable. + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. type: string status: - description: |- - Status of the CertificateRequest. - This is set and managed automatically. - Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + description: Status of the CertificateRequest. This is set and managed automatically. type: object properties: ca: - description: |- - The PEM encoded X.509 certificate of the signer, also known as the CA - (Certificate Authority). - This is set on a best-effort basis by different issuers. - If not set, the CA is assumed to be unknown/not available. + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. type: string format: byte certificate: - description: |- - The PEM encoded X.509 certificate resulting from the certificate - signing request. - If not set, the CertificateRequest has either not been completed or has - failed. More information on failure can be found by checking the - `conditions` field. + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. type: string format: byte conditions: - description: |- - List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`. + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. type: array items: description: CertificateRequestCondition contains condition information for a CertificateRequest. @@ -286,20 +168,14 @@ spec: - type properties: lastTransitionTime: - description: |- - LastTransitionTime is the timestamp corresponding to the last status - change of this condition. + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. type: string format: date-time message: - description: |- - Message is a human readable description of the details of the last - transition, complementing reason. + description: Message is a human readable description of the details of the last transition, complementing reason. type: string reason: - description: |- - Reason is a brief machine readable explanation for the condition's last - transition. + description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). @@ -309,34 +185,22 @@ spec: - "False" - Unknown type: - description: |- - Type of the condition, known values are (`Ready`, `InvalidRequest`, - `Approved`, `Denied`). + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). type: string x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map failureTime: - description: |- - FailureTime stores the time that this CertificateRequest failed. This is - used to influence garbage collection and back-off. + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. type: string format: date-time served: true storage: true - -# END crd {{- end }} - --- -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificates.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' @@ -380,132 +244,70 @@ spec: type: date schema: openAPIV3Schema: - description: |- - A Certificate resource should be created to ensure an up to date and signed - X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. - - - The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`). + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." type: object + required: + - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: |- - Specification of the desired state of the Certificate resource. - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + description: Desired state of the Certificate resource. type: object required: - issuerRef - secretName properties: additionalOutputFormats: - description: |- - Defines extra output formats of the private key and signed certificate chain - to be written to this Certificate's target Secret. - - - This is a Beta Feature enabled by default. It can be disabled with the - `--feature-gates=AdditionalCertificateOutputFormats=false` option set on both - the controller and webhook components. + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. type: array items: - description: |- - CertificateAdditionalOutputFormat defines an additional output format of a - Certificate resource. These contain supplementary data formats of the signed - certificate chain and paired private key. + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. type: object required: - type properties: type: - description: |- - Type is the name of the format type that should be written to the - Certificate's target Secret. + description: Type is the name of the format type that should be written to the Certificate's target Secret. type: string enum: - DER - CombinedPEM commonName: - description: |- - Requested common name X509 certificate subject attribute. - More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - NOTE: TLS clients will ignore this value when any subject alternative name is - set (see https://tools.ietf.org/html/rfc6125#section-6.4.4). - - - Should have a length of 64 characters or fewer to avoid generating invalid CSRs. - Cannot be set if the `literalSubject` field is set. + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' type: string dnsNames: - description: Requested DNS subject alternative names. + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. type: array items: type: string duration: - description: |- - Requested 'duration' (i.e. lifetime) of the Certificate. Note that the - issuer may choose to ignore the requested duration, just like any other - requested attribute. - - - If unset, this defaults to 90 days. - Minimum accepted duration is 1 hour. - Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration type: string emailAddresses: - description: Requested email subject alternative names. + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. type: array items: type: string encodeUsagesInRequest: - description: |- - Whether the KeyUsage and ExtKeyUsage extensions should be set in the encoded CSR. - - - This option defaults to true, and should only be disabled if the target - issuer does not support CSRs with these X509 KeyUsage/ ExtKeyUsage extensions. + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest type: boolean ipAddresses: - description: Requested IP address subject alternative names. + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. type: array items: type: string isCA: - description: |- - Requested basic constraints isCA value. - The isCA value is used to set the `isCA` field on the created CertificateRequest - resources. Note that the issuer may choose to ignore the requested isCA value, just - like any other requested attribute. - - - If true, this will automatically add the `cert sign` usage to the list - of requested `usages`. + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. type: boolean issuerRef: - description: |- - Reference to the issuer responsible for issuing the certificate. - If the issuer is namespace-scoped, it must be in the same namespace - as the Certificate. If the issuer is cluster-scoped, it can be used - from any namespace. - - - The `name` field of the reference must always be specified. + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. type: object required: - name @@ -520,325 +322,94 @@ spec: description: Name of the resource being referred to. type: string keystores: - description: Additional keystore output formats to be stored in the Certificate's Secret. + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. type: object properties: jks: - description: |- - JKS configures options for storing a JKS keystore in the - `spec.secretName` Secret resource. + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. type: object required: - create - passwordSecretRef properties: - alias: - description: |- - Alias specifies the alias of the key in the keystore, required by the JKS format. - If not provided, the default alias `certificate` will be used. - type: string create: - description: |- - Create enables JKS keystore creation for the Certificate. - If true, a file named `keystore.jks` will be created in the target - Secret resource, encrypted using the password stored in - `passwordSecretRef`. - The keystore file will be updated immediately. - If the issuer provided a CA certificate, a file named `truststore.jks` - will also be created in the target Secret resource, encrypted using the - password stored in `passwordSecretRef` - containing the issuing Certificate Authority + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: - description: |- - PasswordSecretRef is a reference to a key in a Secret resource - containing the password used to encrypt the JKS keystore. + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string pkcs12: - description: |- - PKCS12 configures options for storing a PKCS12 keystore in the - `spec.secretName` Secret resource. + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. type: object required: - create - passwordSecretRef properties: create: - description: |- - Create enables PKCS12 keystore creation for the Certificate. - If true, a file named `keystore.p12` will be created in the target - Secret resource, encrypted using the password stored in - `passwordSecretRef`. - The keystore file will be updated immediately. - If the issuer provided a CA certificate, a file named `truststore.p12` will - also be created in the target Secret resource, encrypted using the - password stored in `passwordSecretRef` containing the issuing Certificate - Authority + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority type: boolean passwordSecretRef: - description: |- - PasswordSecretRef is a reference to a key in a Secret resource - containing the password used to encrypt the PKCS12 keystore. + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - profile: - description: |- - Profile specifies the key and certificate encryption algorithms and the HMAC algorithm - used to create the PKCS12 keystore. Default value is `LegacyRC2` for backward compatibility. - - - If provided, allowed values are: - `LegacyRC2`: Deprecated. Not supported by default in OpenSSL 3 or Java 20. - `LegacyDES`: Less secure algorithm. Use this option for maximal compatibility. - `Modern2023`: Secure algorithm. Use this option in case you have to always use secure algorithms - (eg. because of company policy). Please note that the security of the algorithm is not that important - in reality, because the unencrypted certificate and private key are also stored in the Secret. - type: string - enum: - - LegacyRC2 - - LegacyDES - - Modern2023 literalSubject: - description: |- - Requested X.509 certificate subject, represented using the LDAP "String - Representation of a Distinguished Name" [1]. - Important: the LDAP string format also specifies the order of the attributes - in the subject, this is important when issuing certs for LDAP authentication. - Example: `CN=foo,DC=corp,DC=example,DC=com` - More info [1]: https://datatracker.ietf.org/doc/html/rfc4514 - More info: https://github.com/cert-manager/cert-manager/issues/3203 - More info: https://github.com/cert-manager/cert-manager/issues/4424 - - - Cannot be set if the `subject` or `commonName` field is set. + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. type: string - nameConstraints: - description: |- - x.509 certificate NameConstraint extension which MUST NOT be used in a non-CA certificate. - More Info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.10 - - - This is an Alpha Feature and is only enabled with the - `--feature-gates=NameConstraints=true` option set on both - the controller and webhook components. - type: object - properties: - critical: - description: if true then the name constraints are marked critical. - type: boolean - excluded: - description: |- - Excluded contains the constraints which must be disallowed. Any name matching a - restriction in the excluded field is invalid regardless - of information appearing in the permitted - type: object - properties: - dnsDomains: - description: DNSDomains is a list of DNS domains that are permitted or excluded. - type: array - items: - type: string - emailAddresses: - description: EmailAddresses is a list of Email Addresses that are permitted or excluded. - type: array - items: - type: string - ipRanges: - description: |- - IPRanges is a list of IP Ranges that are permitted or excluded. - This should be a valid CIDR notation. - type: array - items: - type: string - uriDomains: - description: URIDomains is a list of URI domains that are permitted or excluded. - type: array - items: - type: string - permitted: - description: Permitted contains the constraints in which the names must be located. - type: object - properties: - dnsDomains: - description: DNSDomains is a list of DNS domains that are permitted or excluded. - type: array - items: - type: string - emailAddresses: - description: EmailAddresses is a list of Email Addresses that are permitted or excluded. - type: array - items: - type: string - ipRanges: - description: |- - IPRanges is a list of IP Ranges that are permitted or excluded. - This should be a valid CIDR notation. - type: array - items: - type: string - uriDomains: - description: URIDomains is a list of URI domains that are permitted or excluded. - type: array - items: - type: string - otherNames: - description: |- - `otherNames` is an escape hatch for SAN that allows any type. We currently restrict the support to string like otherNames, cf RFC 5280 p 37 - Any UTF8 String valued otherName can be passed with by setting the keys oid: x.x.x.x and UTF8Value: somevalue for `otherName`. - Most commonly this would be UPN set with oid: 1.3.6.1.4.1.311.20.2.3 - You should ensure that any OID passed is valid for the UTF8String type as we do not explicitly validate this. - type: array - items: - type: object - properties: - oid: - description: |- - OID is the object identifier for the otherName SAN. - The object identifier must be expressed as a dotted string, for - example, "1.2.840.113556.1.4.221". - type: string - utf8Value: - description: |- - utf8Value is the string value of the otherName SAN. - The utf8Value accepts any valid UTF8 string to set as value for the otherName SAN. - type: string privateKey: - description: |- - Private key options. These include the key algorithm and size, the used - encoding and the rotation policy. + description: Options to control private keys used for the Certificate. type: object properties: algorithm: - description: |- - Algorithm is the private key algorithm of the corresponding private key - for this certificate. - - - If provided, allowed values are either `RSA`, `ECDSA` or `Ed25519`. - If `algorithm` is specified and `size` is not provided, - key size of 2048 will be used for `RSA` key algorithm and - key size of 256 will be used for `ECDSA` key algorithm. - key size is ignored when using the `Ed25519` key algorithm. + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm. type: string enum: - RSA - ECDSA - Ed25519 encoding: - description: |- - The private key cryptography standards (PKCS) encoding for this - certificate's private key to be encoded in. - - - If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 - and PKCS#8, respectively. - Defaults to `PKCS1` if not specified. + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. type: string enum: - PKCS1 - PKCS8 rotationPolicy: - description: |- - RotationPolicy controls how private keys should be regenerated when a - re-issuance is being processed. - - - If set to `Never`, a private key will only be generated if one does not - already exist in the target `spec.secretName`. If one does exists but it - does not have the correct algorithm or size, a warning will be raised - to await user intervention. - If set to `Always`, a private key matching the specified requirements - will be generated whenever a re-issuance occurs. - Default is `Never` for backward compatibility. + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. type: string enum: - Never - Always size: - description: |- - Size is the key bit size of the corresponding private key for this certificate. - - - If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, - and will default to `2048` if not specified. - If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, - and will default to `256` if not specified. - If `algorithm` is set to `Ed25519`, Size is ignored. - No other values are allowed. + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. type: integer renewBefore: - description: |- - How long before the currently issued certificate's expiry cert-manager should - renew the certificate. For example, if a certificate is valid for 60 minutes, - and `renewBefore=10m`, cert-manager will begin to attempt to renew the certificate - 50 minutes after it was issued (i.e. when there are 10 minutes remaining until - the certificate is no longer valid). - - - NOTE: The actual lifetime of the issued certificate is used to determine the - renewal time. If an issuer returns a certificate with a different lifetime than - the one requested, cert-manager will use the lifetime of the issued certificate. - - - If unset, this defaults to 1/3 of the issued certificate's lifetime. - Minimum accepted value is 5 minutes. - Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration type: string revisionHistoryLimit: - description: |- - The maximum number of CertificateRequest revisions that are maintained in - the Certificate's history. Each revision represents a single `CertificateRequest` - created by this Certificate, either when it was created, renewed, or Spec - was changed. Revisions will be removed by oldest first if the number of - revisions exceeds this number. - - - If set, revisionHistoryLimit must be a value of `1` or greater. - If unset (`nil`), revisions will not be garbage collected. - Default value is `nil`. + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. type: integer format: int32 secretName: - description: |- - Name of the Secret resource that will be automatically created and - managed by this Certificate resource. It will be populated with a - private key and certificate, signed by the denoted issuer. The Secret - resource lives in the same namespace as the Certificate resource. + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. type: string secretTemplate: - description: |- - Defines annotations and labels to be copied to the Certificate's Secret. - Labels and annotations on the Secret will be changed as they appear on the - SecretTemplate when added or removed. SecretTemplate annotations are added - in conjunction with, and cannot overwrite, the base set of annotations - cert-manager sets on the Certificate's Secret. + description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. type: object properties: annotations: @@ -852,13 +423,7 @@ spec: additionalProperties: type: string subject: - description: |- - Requested set of X509 certificate subject attributes. - More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - - - The common name attribute is specified separately in the `commonName` field. - Cannot be set if the `literalSubject` field is set. + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). type: object properties: countries: @@ -900,52 +465,15 @@ spec: items: type: string uris: - description: Requested URI subject alternative names. + description: URIs is a list of URI subjectAltNames to be set on the Certificate. type: array items: type: string usages: - description: |- - Requested key usages and extended key usages. - These usages are used to set the `usages` field on the created CertificateRequest - resources. If `encodeUsagesInRequest` is unset or set to `true`, the usages - will additionally be encoded in the `request` field which contains the CSR blob. - - - If unset, defaults to `digital signature` and `key encipherment`. + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: |- - KeyUsage specifies valid usage contexts for keys. - See: - https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - - - Valid KeyUsage values are as follows: - "signing", - "digital signature", - "content commitment", - "key encipherment", - "key agreement", - "data encipherment", - "cert sign", - "crl sign", - "encipher only", - "decipher only", - "any", - "server auth", - "client auth", - "code signing", - "email protection", - "s/mime", - "ipsec end system", - "ipsec tunnel", - "ipsec user", - "timestamping", - "ocsp signing", - "microsoft sgc", - "netscape sgc" + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" type: string enum: - signing @@ -972,17 +500,11 @@ spec: - microsoft sgc - netscape sgc status: - description: |- - Status of the Certificate. - This is set and managed automatically. - Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + description: Status of the Certificate. This is set and managed automatically. type: object properties: conditions: - description: |- - List of status conditions to indicate the status of certificates. - Known condition types are `Ready` and `Issuing`. + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. type: array items: description: CertificateCondition contains condition information for an Certificate. @@ -992,29 +514,18 @@ spec: - type properties: lastTransitionTime: - description: |- - LastTransitionTime is the timestamp corresponding to the last status - change of this condition. + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. type: string format: date-time message: - description: |- - Message is a human readable description of the details of the last - transition, complementing reason. + description: Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: - description: |- - If set, this represents the .metadata.generation that the condition was - set based upon. - For instance, if .metadata.generation is currently 12, but the - .status.condition[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the Certificate. + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. type: integer format: int64 reason: - description: |- - Reason is a brief machine readable explanation for the condition's last - transition. + description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). @@ -1030,85 +541,37 @@ spec: - type x-kubernetes-list-type: map failedIssuanceAttempts: - description: |- - The number of continuous failed issuance attempts up till now. This - field gets removed (if set) on a successful issuance and gets set to - 1 if unset and an issuance has failed. If an issuance has failed, the - delay till the next issuance will be calculated using formula - time.Hour * 2 ^ (failedIssuanceAttempts - 1). + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). type: integer lastFailureTime: - description: |- - LastFailureTime is set only if the lastest issuance for this - Certificate failed and contains the time of the failure. If an - issuance has failed, the delay till the next issuance will be - calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - - 1). If the latest issuance has succeeded this field will be unset. + description: LastFailureTime is set only if the lastest issuance for this Certificate failed and contains the time of the failure. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). If the latest issuance has succeeded this field will be unset. type: string format: date-time nextPrivateKeySecretName: - description: |- - The name of the Secret resource containing the private key to be used - for the next certificate iteration. - The keymanager controller will automatically set this field if the - `Issuing` condition is set to `True`. - It will automatically unset this field when the Issuing condition is - not set or False. + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. type: string notAfter: - description: |- - The expiration time of the certificate stored in the secret named - by this resource in `spec.secretName`. + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. type: string format: date-time notBefore: - description: |- - The time after which the certificate stored in the secret named - by this resource in `spec.secretName` is valid. + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. type: string format: date-time renewalTime: - description: |- - RenewalTime is the time at which the certificate will be next - renewed. - If not set, no upcoming renewal is scheduled. + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. type: string format: date-time revision: - description: |- - The current 'revision' of the certificate as issued. - - - When a CertificateRequest resource is created, it will have the - `cert-manager.io/certificate-revision` set to one greater than the - current value of this field. - - - Upon issuance, this field will be set to the value of the annotation - on the CertificateRequest resource used to issue the certificate. - - - Persisting the value on the CertificateRequest resource allows the - certificates controller to know whether a request is part of an old - issuance or if it is part of the ongoing revision's issuance by - checking if the revision value in the annotation is greater than this - field. + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." type: integer served: true storage: true - -# END crd {{- end }} - --- -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: challenges.acme.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' @@ -1151,19 +614,10 @@ spec: - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -1180,23 +634,13 @@ spec: - url properties: authorizationURL: - description: |- - The URL to the ACME Authorization resource that this - challenge is a part of. + description: The URL to the ACME Authorization resource that this challenge is a part of. type: string dnsName: - description: |- - dnsName is the identifier that this challenge is for, e.g. example.com. - If the requested DNSName is a 'wildcard', this field MUST be set to the - non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. type: string issuerRef: - description: |- - References a properly configured ACME-type Issuer which should - be used to create this Challenge. - If the Issuer does not exist, processing will be retried. - If the Issuer is not an 'ACME' Issuer, an error will be returned and the - Challenge will be marked as failed. + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. type: object required: - name @@ -1211,54 +655,34 @@ spec: description: Name of the resource being referred to. type: string key: - description: |- - The ACME challenge key for this challenge - For HTTP01 challenges, this is the value that must be responded with to - complete the HTTP01 challenge in the format: - `.`. - For DNS01 challenges, this is the base64 encoded SHA256 sum of the - `.` - text that must be set as the TXT record content. + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' type: string solver: - description: |- - Contains the domain solving configuration that should be used to - solve this challenge resource. + description: Contains the domain solving configuration that should be used to solve this challenge resource. type: object properties: dns01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the DNS01 challenge flow. + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. type: object properties: acmeDNS: - description: |- - Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage - DNS01 challenge records. + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. type: object required: - accountSecretRef - host properties: accountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string host: type: string @@ -1272,61 +696,40 @@ spec: - serviceConsumerDomain properties: accessTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string serviceConsumerDomain: type: string @@ -1338,30 +741,19 @@ spec: - subscriptionID properties: clientID: - description: |- - Auth: Azure Service Principal: - The ClientID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientSecret and TenantID must also be set. + description: if both this and ClientSecret are left unset MSI will be used type: string clientSecretSecretRef: - description: |- - Auth: Azure Service Principal: - A reference to a Secret containing the password associated with the Service Principal. - If set, ClientID and TenantID must also be set. + description: if both this and ClientID are left unset MSI will be used type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string environment: description: name of the Azure environment (default AzurePublicCloud) @@ -1375,19 +767,14 @@ spec: description: name of the DNS zone that should be used type: string managedIdentity: - description: |- - Auth: Azure Workload Identity or Azure Managed Service Identity: - Settings to enable Azure Workload Identity or Azure Managed Service Identity - If set, ClientID, ClientSecret and TenantID must not be set. + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID type: object properties: clientID: description: client ID of the managed identity, can not be used at the same time as resourceID type: string resourceID: - description: |- - resource ID of the managed identity, can not be used at the same time as clientID - Cannot be used for Azure Managed Service Identity + description: resource ID of the managed identity, can not be used at the same time as clientID type: string resourceGroupName: description: resource group the DNS zone is located in @@ -1396,10 +783,7 @@ spec: description: ID of the Azure subscription type: string tenantID: - description: |- - Auth: Azure Service Principal: - The TenantID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientID and ClientSecret must also be set. + description: when specifying ClientID and ClientSecret then this field is also needed type: string cloudDNS: description: Use the Google Cloud DNS API to manage DNS01 challenge records. @@ -1408,55 +792,37 @@ spec: - project properties: hostedZoneName: - description: |- - HostedZoneName is an optional field that tells cert-manager in which - Cloud DNS zone the challenge record has to be created. - If left empty cert-manager will automatically choose a zone. + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. type: string project: type: string serviceAccountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string cloudflare: description: Use the Cloudflare API to manage DNS01 challenge records. type: object properties: apiKeySecretRef: - description: |- - API key to use to authenticate with Cloudflare. - Note: using an API token to authenticate is now the recommended method - as it allows greater control of permissions. + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string apiTokenSecretRef: description: API token used to authenticate with Cloudflare. @@ -1465,23 +831,16 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string email: description: Email of the account, only required when using API key based authentication. type: string cnameStrategy: - description: |- - CNAMEStrategy configures how the DNS01 provider should handle CNAME - records when found in DNS zones. + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. type: string enum: - None @@ -1493,69 +852,43 @@ spec: - tokenSecretRef properties: tokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string rfc2136: - description: |- - Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. type: object required: - nameserver properties: nameserver: - description: |- - The IP address or hostname of an authoritative DNS server supporting - RFC2136 in the form host:port. If the host is an IPv6 address it must be - enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. - This field is required. + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. type: string tsigAlgorithm: - description: |- - The TSIG Algorithm configured in the DNS supporting RFC2136. Used only - when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. - Supported values are (case-insensitive): ``HMACMD5`` (default), - ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' type: string tsigKeyName: - description: |- - The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field is required. + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. type: string tsigSecretSecretRef: - description: |- - The name of the secret containing the TSIG value. - If ``tsigKeyName`` is defined, this field is required. + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string route53: description: Use the AWS Route53 API to manage DNS01 challenge records. @@ -1564,71 +897,20 @@ spec: - region properties: accessKeyID: - description: |- - The AccessKeyID is used for authentication. - Cannot be set when SecretAccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string accessKeyIDSecretRef: - description: |- - The SecretAccessKey is used for authentication. If set, pull the AWS - access key ID from a key within a Kubernetes Secret. - Cannot be set when AccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - auth: - description: Auth configures how cert-manager authenticates. - type: object - required: - - kubernetes - properties: - kubernetes: - description: |- - Kubernetes authenticates with Route53 using AssumeRoleWithWebIdentity - by passing a bound ServiceAccount token. - type: object - required: - - serviceAccountRef - properties: - serviceAccountRef: - description: |- - A reference to a service account that will be used to request a bound - token (also known as "projected token"). To use this field, you must - configure an RBAC rule to let cert-manager request a token. - type: object - required: - - name - properties: - audiences: - description: |- - TokenAudiences is an optional list of audiences to include in the - token passed to AWS. The default token consisting of the issuer's namespace - and name is always included. - If unset the audience defaults to `sts.amazonaws.com`. - type: array - items: - type: string - name: - description: Name of the ServiceAccount used to request a token. - type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -1636,301 +918,113 @@ spec: description: Always set the region when using AccessKeyID and SecretAccessKey type: string role: - description: |- - Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey - or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: |- - The SecretAccessKey is used for authentication. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string webhook: - description: |- - Configure an external webhook based DNS01 challenge solver to manage - DNS01 challenge records. + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. type: object required: - groupName - solverName properties: config: - description: |- - Additional configuration that should be passed to the webhook apiserver - when challenges are processed. - This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for a DNS service), you - should use a SecretKeySelector to reference a Secret resource. - For details on the schema of this field, consult the webhook provider - implementation's documentation. + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. x-kubernetes-preserve-unknown-fields: true groupName: - description: |- - The API group name that should be used when POSTing ChallengePayload - resources to the webhook apiserver. - This should be the same as the GroupName specified in the webhook - provider implementation. + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. type: string solverName: - description: |- - The name of the solver to use, as defined in the webhook provider - implementation. - This will typically be the name of the provider, e.g. 'cloudflare'. + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. type: string http01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain names - (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. type: object properties: gatewayHTTPRoute: - description: |- - The Gateway API is a sig-network community API that models service networking - in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will - create HTTPRoutes with the specified labels in the same namespace as the challenge. - This solver is experimental, and fields / behaviour may change in the future. + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. type: object properties: labels: - description: |- - Custom labels that will be applied to HTTPRoutes created by cert-manager - while solving HTTP-01 challenges. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string parentRefs: - description: |- - When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. - cert-manager needs to know which parentRefs should be used when creating - the HTTPRoute. Usually, the parentRef references a Gateway. See: - https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' type: array items: - description: |- - ParentReference identifies an API object (usually a Gateway) that can be considered - a parent of this resource (usually a route). There are two kinds of parent resources - with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - This API may be extended in the future to support additional kinds of parent - resources. - - - The API object must be valid in the cluster; the Group and Kind must - be registered in the cluster for this reference to be valid. + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." type: object required: - name properties: group: - description: |- - Group is the group of the referent. - When unspecified, "gateway.networking.k8s.io" is inferred. - To set the core API group (such as for a "Service" kind referent), - Group must be explicitly set to "" (empty string). - - - Support: Core + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" type: string default: gateway.networking.k8s.io maxLength: 253 pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ kind: - description: |- - Kind is kind of the referent. - - - There are two kinds of parent resources with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - Support for other resources is Implementation-Specific. + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" type: string default: Gateway maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ name: - description: |- - Name is the name of the referent. - - - Support: Core + description: "Name is the name of the referent. \n Support: Core" type: string maxLength: 253 minLength: 1 namespace: - description: |- - Namespace is the namespace of the referent. When unspecified, this refers - to the local namespace of the Route. - - - Note that there are specific rules for ParentRefs which cross namespace - boundaries. Cross-namespace references are only valid if they are explicitly - allowed by something in the namespace they are referring to. For example: - Gateway has the AllowedRoutes field, and ReferenceGrant provides a - generic way to enable any other kind of cross-namespace reference. - - - - ParentRefs from a Route to a Service in the same namespace are "producer" - routes, which apply default routing rules to inbound connections from - any namespace to the Service. - - - ParentRefs from a Route to a Service in a different namespace are - "consumer" routes, and these routing rules are only applied to outbound - connections originating from the same namespace as the Route, for which - the intended destination of the connections are a Service targeted as a - ParentRef of the Route. - - - - Support: Core + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" type: string maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ port: - description: |- - Port is the network port this Route targets. It can be interpreted - differently based on the type of parent resource. - - - When the parent resource is a Gateway, this targets all listeners - listening on the specified port that also support this kind of Route(and - select this Route). It's not recommended to set `Port` unless the - networking behaviors specified in a Route must apply to a specific port - as opposed to a listener(s) whose port(s) may be changed. When both Port - and SectionName are specified, the name and port of the selected listener - must match both specified values. - - - - When the parent resource is a Service, this targets a specific port in the - Service spec. When both Port (experimental) and SectionName are specified, - the name and port of the selected port must match both specified values. - - - - Implementations MAY choose to support other parent resources. - Implementations supporting other types of parent resources MUST clearly - document how/if Port is interpreted. - - - For the purpose of status, an attachment is considered successful as - long as the parent resource accepts it partially. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment - from the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, - the Route MUST be considered detached from the Gateway. - - - Support: Extended + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " type: integer format: int32 maximum: 65535 minimum: 1 sectionName: - description: |- - SectionName is the name of a section within the target resource. In the - following resources, SectionName is interpreted as the following: - - - * Gateway: Listener name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - * Service: Port name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - - - Implementations MAY choose to support attaching Routes to other resources. - If that is the case, they MUST clearly document how SectionName is - interpreted. - - - When unspecified (empty string), this will reference the entire resource. - For the purpose of status, an attachment is considered successful if at - least one section in the parent resource accepts it. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from - the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, the - Route MUST be considered detached from the Gateway. - - - Support: Core + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" type: string maxLength: 253 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string ingress: - description: |- - The ingress based HTTP01 challenge solver will solve challenges by - creating or modifying Ingress resources in order to route requests for - '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are - provisioned by cert-manager for each Challenge to be completed. + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. type: object properties: class: - description: |- - This field configures the annotation `kubernetes.io/ingress.class` when - creating Ingress resources to solve ACME challenges that use this - challenge solver. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressClassName: - description: |- - This field configures the field `ingressClassName` on the created Ingress - resources used to solve ACME challenges that use this challenge solver. - This is the recommended way of configuring the ingress class. Only one of - `class`, `name` or `ingressClassName` may be specified. + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressTemplate: - description: |- - Optional ingress template used to configure the ACME challenge solver - ingress used for HTTP01 challenges. + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the ingress used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -1944,26 +1038,14 @@ spec: additionalProperties: type: string name: - description: |- - The name of the ingress resource that should have ACME challenge solving - routes inserted into it in order to solve HTTP01 challenges. - This is typically used in conjunction with ingress controllers like - ingress-gce, which maintains a 1:1 mapping between external IPs and - ingress resources. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. type: string podTemplate: - description: |- - Optional pod template used to configure the ACME challenge solver pods - used for HTTP01 challenges. + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the pod used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -1977,10 +1059,7 @@ spec: additionalProperties: type: string spec: - description: |- - PodSpec defines overrides for the HTTP01 challenge solver pod. - Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. - All other fields will be ignored. + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. type: object properties: affinity: @@ -1992,21 +1071,10 @@ spec: type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. type: array items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type: object required: - preference @@ -2020,9 +1088,7 @@ spec: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2032,29 +1098,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2064,35 +1119,20 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. type: object required: - nodeSelectorTerms @@ -2101,19 +1141,14 @@ spec: description: Required. A list of node selector terms. The terms are ORed. type: array items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. type: object properties: matchExpressions: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2123,29 +1158,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2155,40 +1179,21 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -2204,18 +1209,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2225,76 +1226,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2304,90 +1257,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2397,76 +1309,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2476,64 +1340,33 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -2549,18 +1382,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2570,76 +1399,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2649,90 +1430,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2742,76 +1482,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -2821,75 +1513,40 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic imagePullSecrets: description: If specified, the pod's imagePullSecrets type: array items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. type: object properties: name: - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - default: "" x-kubernetes-map-type: atomic nodeSelector: - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object additionalProperties: type: string @@ -2903,141 +1560,76 @@ spec: description: If specified, the pod's tolerations. type: array items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . type: object properties: effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. type: string tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. type: integer format: int64 value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string selector: - description: |- - Selector selects a set of DNSNames on the Certificate resource that - should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' solver - with the lowest priority, i.e. if any other solver has a more specific - match, it will be used instead. + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. type: object properties: dnsNames: - description: |- - List of DNSNames that this solver will be used to solve. - If specified and a match is found, a dnsNames selector will take - precedence over a dnsZones selector. - If multiple solvers match with the same dnsNames value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string dnsZones: - description: |- - List of DNSZones that this solver will be used to solve. - The most specific DNS zone match specified here will take precedence - over other DNS zone matches, so a solver specifying sys.example.com - will be selected over one specifying example.com for the domain - www.sys.example.com. - If multiple solvers match with the same dnsZones value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string matchLabels: - description: |- - A label selector that is used to refine the set of certificate's that - this challenge solver will apply to. + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. type: object additionalProperties: type: string token: - description: |- - The ACME challenge token for this challenge. - This is the raw value returned from the ACME server. + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. type: string type: - description: |- - The type of ACME challenge this resource represents. - One of "HTTP-01" or "DNS-01". + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". type: string enum: - HTTP-01 - DNS-01 url: - description: |- - The URL of the ACME Challenge resource for this challenge. - This can be used to lookup details about the status of this challenge. + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. type: string wildcard: - description: |- - wildcard will be true if this challenge is for a wildcard identifier, - for example '*.example.com'. + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. type: boolean status: type: object properties: presented: - description: |- - presented will be set to true if the challenge values for this challenge - are currently 'presented'. - This *does not* imply the self check is passing. Only that the values - have been 'submitted' for the appropriate challenge mechanism (i.e. the - DNS01 TXT record has been presented, or the HTTP01 configuration has been - configured). + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). type: boolean processing: - description: |- - Used to denote whether this challenge should be processed or not. - This field will only be set to true by the 'scheduling' component. - It will only be set to false by the 'challenges' controller, after the - challenge has reached a final state or timed out. - If this field is set to false, the challenge controller will not take - any more action. + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. type: boolean reason: - description: |- - Contains human readable information on why the Challenge is in the - current state. + description: Contains human readable information on why the Challenge is in the current state. type: string state: - description: |- - Contains the current 'state' of the challenge. - If not set, the state of the challenge is unknown. + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. type: string enum: - valid @@ -3051,23 +1643,15 @@ spec: storage: true subresources: status: {} - -# END crd {{- end }} - --- -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clusterissuers.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' + app.kubernetes.io/instance: "{{ .Release.Name }}" # Generated labels {{- include "labels" . | nindent 4 }} spec: group: cert-manager.io @@ -3097,30 +1681,16 @@ spec: type: date schema: openAPIV3Schema: - description: |- - A ClusterIssuer represents a certificate issuing authority which can be - referenced as part of `issuerRef` fields. - It is similar to an Issuer, however it is cluster-scoped and therefore can - be referenced by resources that exist in *any* namespace, not just the same - namespace as the referent. + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. type: object required: - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -3129,65 +1699,34 @@ spec: type: object properties: acme: - description: |- - ACME configures this issuer to communicate with a RFC8555 (ACME) server - to obtain signed x509 certificates. + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. type: object required: - privateKeySecretRef - server properties: caBundle: - description: |- - Base64-encoded bundle of PEM CAs which can be used to validate the certificate - chain presented by the ACME server. - Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various - kinds of security vulnerabilities. - If CABundle and SkipTLSVerify are unset, the system certificate bundle inside - the container is used to validate the TLS connection. + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. type: string format: byte disableAccountKeyGeneration: - description: |- - Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account but will expect - the account key to be supplied via an existing secret. - If false, the cert-manager system will generate a new ACME account key - for the Issuer. - Defaults to false. + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. type: boolean email: - description: |- - Email is the email address to be associated with the ACME account. - This field is optional, but it is strongly recommended to be set. - It will be used to contact you in case of issues with your account or - certificates, including expiry notification emails. - This field may be updated after the account is initially registered. + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. type: string enableDurationFeature: - description: |- - Enables requesting a Not After date on certificates that matches the - duration of the certificate. This is not supported by all ACME servers - like Let's Encrypt. If set to true when the ACME server does not support - it, it will create an error on the Order. - Defaults to false. + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. type: boolean externalAccountBinding: - description: |- - ExternalAccountBinding is a reference to a CA external account of the ACME - server. - If set, upon registration cert-manager will attempt to associate the given - external account credentials with the registered ACME account. + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. type: object required: - keyID - keySecretRef properties: keyAlgorithm: - description: |- - Deprecated: keyAlgorithm field exists for historical compatibility - reasons and should not be used. The algorithm is now hardcoded to HS256 - in golang/x/crypto/acme. + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' type: string enum: - HS256 @@ -3197,130 +1736,68 @@ spec: description: keyID is the ID of the CA key that the External Account is bound to. type: string keySecretRef: - description: |- - keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes - Secret which holds the symmetric MAC key of the External Account Binding. - The `key` is the index string that is paired with the key data in the - Secret and should not be confused with the key data itself, or indeed with - the External Account Binding keyID above. - The secret key stored in the Secret **must** be un-padded, base64 URL - encoded data. + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string preferredChain: - description: |- - PreferredChain is the chain to use if the ACME server outputs multiple. - PreferredChain is no guarantee that this one gets delivered by the ACME - endpoint. - For example, for Let's Encrypt's DST crosssign you would use: - "DST Root CA X3" or "ISRG Root X1" for the newer Let's Encrypt root CA. - This value picks the first certificate bundle in the combined set of - ACME default and alternative chains that has a root-most certificate with - this value as its issuer's commonname. + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' type: string maxLength: 64 privateKeySecretRef: - description: |- - PrivateKey is the name of a Kubernetes Secret resource that will be used to - store the automatically generated ACME account private key. - Optionally, a `key` may be specified to select a specific entry within - the named Secret resource. - If `key` is not specified, a default of `tls.key` will be used. + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string server: - description: |- - Server is the URL used to access the ACME server's 'directory' endpoint. - For example, for Let's Encrypt's staging endpoint, you would use: - "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported. + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' type: string skipTLSVerify: - description: |- - INSECURE: Enables or disables validation of the ACME server TLS certificate. - If true, requests to the ACME server will not have the TLS certificate chain - validated. - Mutually exclusive with CABundle; prefer using CABundle to prevent various - kinds of security vulnerabilities. - Only enable this option in development environments. - If CABundle and SkipTLSVerify are unset, the system certificate bundle inside - the container is used to validate the TLS connection. - Defaults to false. + description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.' type: boolean solvers: - description: |- - Solvers is a list of challenge solvers that will be used to solve - ACME challenges for the matching domains. - Solver configurations must be provided in order to obtain certificates - from an ACME server. - For more information, see: https://cert-manager.io/docs/configuration/acme/ + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' type: array items: - description: |- - An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. - A selector may be provided to use different solving strategies for different DNS names. - Only one of HTTP01 or DNS01 must be provided. + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. type: object properties: dns01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the DNS01 challenge flow. + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. type: object properties: acmeDNS: - description: |- - Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage - DNS01 challenge records. + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. type: object required: - accountSecretRef - host properties: accountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string host: type: string @@ -3334,61 +1811,40 @@ spec: - serviceConsumerDomain properties: accessTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string serviceConsumerDomain: type: string @@ -3400,30 +1856,19 @@ spec: - subscriptionID properties: clientID: - description: |- - Auth: Azure Service Principal: - The ClientID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientSecret and TenantID must also be set. + description: if both this and ClientSecret are left unset MSI will be used type: string clientSecretSecretRef: - description: |- - Auth: Azure Service Principal: - A reference to a Secret containing the password associated with the Service Principal. - If set, ClientID and TenantID must also be set. + description: if both this and ClientID are left unset MSI will be used type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string environment: description: name of the Azure environment (default AzurePublicCloud) @@ -3437,19 +1882,14 @@ spec: description: name of the DNS zone that should be used type: string managedIdentity: - description: |- - Auth: Azure Workload Identity or Azure Managed Service Identity: - Settings to enable Azure Workload Identity or Azure Managed Service Identity - If set, ClientID, ClientSecret and TenantID must not be set. + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID type: object properties: clientID: description: client ID of the managed identity, can not be used at the same time as resourceID type: string resourceID: - description: |- - resource ID of the managed identity, can not be used at the same time as clientID - Cannot be used for Azure Managed Service Identity + description: resource ID of the managed identity, can not be used at the same time as clientID type: string resourceGroupName: description: resource group the DNS zone is located in @@ -3458,10 +1898,7 @@ spec: description: ID of the Azure subscription type: string tenantID: - description: |- - Auth: Azure Service Principal: - The TenantID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientID and ClientSecret must also be set. + description: when specifying ClientID and ClientSecret then this field is also needed type: string cloudDNS: description: Use the Google Cloud DNS API to manage DNS01 challenge records. @@ -3470,55 +1907,37 @@ spec: - project properties: hostedZoneName: - description: |- - HostedZoneName is an optional field that tells cert-manager in which - Cloud DNS zone the challenge record has to be created. - If left empty cert-manager will automatically choose a zone. + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. type: string project: type: string serviceAccountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string cloudflare: description: Use the Cloudflare API to manage DNS01 challenge records. type: object properties: apiKeySecretRef: - description: |- - API key to use to authenticate with Cloudflare. - Note: using an API token to authenticate is now the recommended method - as it allows greater control of permissions. + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string apiTokenSecretRef: description: API token used to authenticate with Cloudflare. @@ -3527,23 +1946,16 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string email: description: Email of the account, only required when using API key based authentication. type: string cnameStrategy: - description: |- - CNAMEStrategy configures how the DNS01 provider should handle CNAME - records when found in DNS zones. + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. type: string enum: - None @@ -3555,69 +1967,43 @@ spec: - tokenSecretRef properties: tokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string rfc2136: - description: |- - Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. type: object required: - nameserver properties: nameserver: - description: |- - The IP address or hostname of an authoritative DNS server supporting - RFC2136 in the form host:port. If the host is an IPv6 address it must be - enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. - This field is required. + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. type: string tsigAlgorithm: - description: |- - The TSIG Algorithm configured in the DNS supporting RFC2136. Used only - when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. - Supported values are (case-insensitive): ``HMACMD5`` (default), - ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' type: string tsigKeyName: - description: |- - The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field is required. + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. type: string tsigSecretSecretRef: - description: |- - The name of the secret containing the TSIG value. - If ``tsigKeyName`` is defined, this field is required. + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string route53: description: Use the AWS Route53 API to manage DNS01 challenge records. @@ -3626,71 +2012,20 @@ spec: - region properties: accessKeyID: - description: |- - The AccessKeyID is used for authentication. - Cannot be set when SecretAccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string accessKeyIDSecretRef: - description: |- - The SecretAccessKey is used for authentication. If set, pull the AWS - access key ID from a key within a Kubernetes Secret. - Cannot be set when AccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - auth: - description: Auth configures how cert-manager authenticates. - type: object - required: - - kubernetes - properties: - kubernetes: - description: |- - Kubernetes authenticates with Route53 using AssumeRoleWithWebIdentity - by passing a bound ServiceAccount token. - type: object - required: - - serviceAccountRef - properties: - serviceAccountRef: - description: |- - A reference to a service account that will be used to request a bound - token (also known as "projected token"). To use this field, you must - configure an RBAC rule to let cert-manager request a token. - type: object - required: - - name - properties: - audiences: - description: |- - TokenAudiences is an optional list of audiences to include in the - token passed to AWS. The default token consisting of the issuer's namespace - and name is always included. - If unset the audience defaults to `sts.amazonaws.com`. - type: array - items: - type: string - name: - description: Name of the ServiceAccount used to request a token. - type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -3698,301 +2033,113 @@ spec: description: Always set the region when using AccessKeyID and SecretAccessKey type: string role: - description: |- - Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey - or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: |- - The SecretAccessKey is used for authentication. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string webhook: - description: |- - Configure an external webhook based DNS01 challenge solver to manage - DNS01 challenge records. + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. type: object required: - groupName - solverName properties: config: - description: |- - Additional configuration that should be passed to the webhook apiserver - when challenges are processed. - This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for a DNS service), you - should use a SecretKeySelector to reference a Secret resource. - For details on the schema of this field, consult the webhook provider - implementation's documentation. + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. x-kubernetes-preserve-unknown-fields: true groupName: - description: |- - The API group name that should be used when POSTing ChallengePayload - resources to the webhook apiserver. - This should be the same as the GroupName specified in the webhook - provider implementation. + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. type: string solverName: - description: |- - The name of the solver to use, as defined in the webhook provider - implementation. - This will typically be the name of the provider, e.g. 'cloudflare'. + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. type: string http01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain names - (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. type: object properties: gatewayHTTPRoute: - description: |- - The Gateway API is a sig-network community API that models service networking - in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will - create HTTPRoutes with the specified labels in the same namespace as the challenge. - This solver is experimental, and fields / behaviour may change in the future. + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. type: object properties: labels: - description: |- - Custom labels that will be applied to HTTPRoutes created by cert-manager - while solving HTTP-01 challenges. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string parentRefs: - description: |- - When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. - cert-manager needs to know which parentRefs should be used when creating - the HTTPRoute. Usually, the parentRef references a Gateway. See: - https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' type: array items: - description: |- - ParentReference identifies an API object (usually a Gateway) that can be considered - a parent of this resource (usually a route). There are two kinds of parent resources - with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - This API may be extended in the future to support additional kinds of parent - resources. - - - The API object must be valid in the cluster; the Group and Kind must - be registered in the cluster for this reference to be valid. + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." type: object required: - name properties: group: - description: |- - Group is the group of the referent. - When unspecified, "gateway.networking.k8s.io" is inferred. - To set the core API group (such as for a "Service" kind referent), - Group must be explicitly set to "" (empty string). - - - Support: Core + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" type: string default: gateway.networking.k8s.io maxLength: 253 pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ kind: - description: |- - Kind is kind of the referent. - - - There are two kinds of parent resources with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - Support for other resources is Implementation-Specific. + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" type: string default: Gateway maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ name: - description: |- - Name is the name of the referent. - - - Support: Core + description: "Name is the name of the referent. \n Support: Core" type: string maxLength: 253 minLength: 1 namespace: - description: |- - Namespace is the namespace of the referent. When unspecified, this refers - to the local namespace of the Route. - - - Note that there are specific rules for ParentRefs which cross namespace - boundaries. Cross-namespace references are only valid if they are explicitly - allowed by something in the namespace they are referring to. For example: - Gateway has the AllowedRoutes field, and ReferenceGrant provides a - generic way to enable any other kind of cross-namespace reference. - - - - ParentRefs from a Route to a Service in the same namespace are "producer" - routes, which apply default routing rules to inbound connections from - any namespace to the Service. - - - ParentRefs from a Route to a Service in a different namespace are - "consumer" routes, and these routing rules are only applied to outbound - connections originating from the same namespace as the Route, for which - the intended destination of the connections are a Service targeted as a - ParentRef of the Route. - - - - Support: Core + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" type: string maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ port: - description: |- - Port is the network port this Route targets. It can be interpreted - differently based on the type of parent resource. - - - When the parent resource is a Gateway, this targets all listeners - listening on the specified port that also support this kind of Route(and - select this Route). It's not recommended to set `Port` unless the - networking behaviors specified in a Route must apply to a specific port - as opposed to a listener(s) whose port(s) may be changed. When both Port - and SectionName are specified, the name and port of the selected listener - must match both specified values. - - - - When the parent resource is a Service, this targets a specific port in the - Service spec. When both Port (experimental) and SectionName are specified, - the name and port of the selected port must match both specified values. - - - - Implementations MAY choose to support other parent resources. - Implementations supporting other types of parent resources MUST clearly - document how/if Port is interpreted. - - - For the purpose of status, an attachment is considered successful as - long as the parent resource accepts it partially. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment - from the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, - the Route MUST be considered detached from the Gateway. - - - Support: Extended + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " type: integer format: int32 maximum: 65535 minimum: 1 sectionName: - description: |- - SectionName is the name of a section within the target resource. In the - following resources, SectionName is interpreted as the following: - - - * Gateway: Listener name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - * Service: Port name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - - - Implementations MAY choose to support attaching Routes to other resources. - If that is the case, they MUST clearly document how SectionName is - interpreted. - - - When unspecified (empty string), this will reference the entire resource. - For the purpose of status, an attachment is considered successful if at - least one section in the parent resource accepts it. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from - the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, the - Route MUST be considered detached from the Gateway. - - - Support: Core + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" type: string maxLength: 253 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string ingress: - description: |- - The ingress based HTTP01 challenge solver will solve challenges by - creating or modifying Ingress resources in order to route requests for - '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are - provisioned by cert-manager for each Challenge to be completed. + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. type: object properties: class: - description: |- - This field configures the annotation `kubernetes.io/ingress.class` when - creating Ingress resources to solve ACME challenges that use this - challenge solver. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressClassName: - description: |- - This field configures the field `ingressClassName` on the created Ingress - resources used to solve ACME challenges that use this challenge solver. - This is the recommended way of configuring the ingress class. Only one of - `class`, `name` or `ingressClassName` may be specified. + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressTemplate: - description: |- - Optional ingress template used to configure the ACME challenge solver - ingress used for HTTP01 challenges. + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the ingress used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -4006,26 +2153,14 @@ spec: additionalProperties: type: string name: - description: |- - The name of the ingress resource that should have ACME challenge solving - routes inserted into it in order to solve HTTP01 challenges. - This is typically used in conjunction with ingress controllers like - ingress-gce, which maintains a 1:1 mapping between external IPs and - ingress resources. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. type: string podTemplate: - description: |- - Optional pod template used to configure the ACME challenge solver pods - used for HTTP01 challenges. + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the pod used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -4039,10 +2174,7 @@ spec: additionalProperties: type: string spec: - description: |- - PodSpec defines overrides for the HTTP01 challenge solver pod. - Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. - All other fields will be ignored. + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. type: object properties: affinity: @@ -4054,21 +2186,10 @@ spec: type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. type: array items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type: object required: - preference @@ -4082,9 +2203,7 @@ spec: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4094,29 +2213,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4126,35 +2234,20 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. type: object required: - nodeSelectorTerms @@ -4163,19 +2256,14 @@ spec: description: Required. A list of node selector terms. The terms are ORed. type: array items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. type: object properties: matchExpressions: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4185,29 +2273,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4217,40 +2294,21 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -4266,18 +2324,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4287,76 +2341,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4366,90 +2372,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4459,76 +2424,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4538,64 +2455,33 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -4611,18 +2497,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4632,76 +2514,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4711,90 +2545,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4804,76 +2597,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -4883,75 +2628,40 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic imagePullSecrets: description: If specified, the pod's imagePullSecrets type: array items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. type: object properties: name: - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - default: "" x-kubernetes-map-type: atomic nodeSelector: - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object additionalProperties: type: string @@ -4965,146 +2675,77 @@ spec: description: If specified, the pod's tolerations. type: array items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . type: object properties: effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. type: string tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. type: integer format: int64 value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string selector: - description: |- - Selector selects a set of DNSNames on the Certificate resource that - should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' solver - with the lowest priority, i.e. if any other solver has a more specific - match, it will be used instead. + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. type: object properties: dnsNames: - description: |- - List of DNSNames that this solver will be used to solve. - If specified and a match is found, a dnsNames selector will take - precedence over a dnsZones selector. - If multiple solvers match with the same dnsNames value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string dnsZones: - description: |- - List of DNSZones that this solver will be used to solve. - The most specific DNS zone match specified here will take precedence - over other DNS zone matches, so a solver specifying sys.example.com - will be selected over one specifying example.com for the domain - www.sys.example.com. - If multiple solvers match with the same dnsZones value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string matchLabels: - description: |- - A label selector that is used to refine the set of certificate's that - this challenge solver will apply to. + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. type: object additionalProperties: type: string ca: - description: |- - CA configures this issuer to sign certificates using a signing CA keypair - stored in a Secret resource. - This is used to build internal PKIs that are managed by cert-manager. + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. type: object required: - secretName properties: crlDistributionPoints: - description: |- - The CRL distribution points is an X.509 v3 certificate extension which identifies - the location of the CRL from which the revocation of this certificate can be checked. - If not set, certificates will be issued without distribution points set. - type: array - items: - type: string - issuingCertificateURLs: - description: |- - IssuingCertificateURLs is a list of URLs which this issuer should embed into certificates - it creates. See https://www.rfc-editor.org/rfc/rfc5280#section-4.2.2.1 for more details. - As an example, such a URL might be "http://ca.domain.com/ca.crt". + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. type: array items: type: string ocspServers: - description: |- - The OCSP server list is an X.509 v3 extension that defines a list of - URLs of OCSP responders. The OCSP responders can be queried for the - revocation status of an issued certificate. If not set, the - certificate will be issued with no OCSP servers set. For example, an - OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". type: array items: type: string secretName: - description: |- - SecretName is the name of the secret used to sign Certificates issued - by this Issuer. + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string selfSigned: - description: |- - SelfSigned configures this issuer to 'self sign' certificates using the - private key used to create the CertificateRequest object. + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. type: object properties: crlDistributionPoints: - description: |- - The CRL distribution points is an X.509 v3 certificate extension which identifies - the location of the CRL from which the revocation of this certificate can be checked. - If not set certificate will be issued without CDP. Values are strings. + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. type: array items: type: string vault: - description: |- - Vault configures this issuer to sign certificates using a HashiCorp Vault - PKI backend. + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. type: object required: - auth @@ -5116,9 +2757,7 @@ spec: type: object properties: appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. type: object required: - path @@ -5126,94 +2765,53 @@ spec: - secretRef properties: path: - description: |- - Path where the App Role authentication backend is mounted in Vault, e.g: - "approle" + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' type: string roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. type: string secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. type: object required: - role properties: mountPath: - description: |- - The Vault mountPath here is the mount path to use when authenticating with - Vault. For example, setting a value to `/v1/auth/foo`, will use the path - `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the - default value "/v1/auth/kubernetes" will be used. + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. type: string role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. type: string secretRef: - description: |- - The required Secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. Use of 'ambient credentials' is not - supported. + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string serviceAccountRef: - description: |- - A reference to a service account that will be used to request a bound - token (also known as "projected token"). Compared to using "secretRef", - using this field means that you don't rely on statically bound tokens. To - use this field, you must configure an RBAC rule to let cert-manager - request a token. + description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. type: object required: - name properties: - audiences: - description: |- - TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token - consisting of the issuer's namespace and name is always included. - type: array - items: - type: string name: description: Name of the ServiceAccount used to request a token. type: string @@ -5224,112 +2822,44 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string caBundle: - description: |- - Base64-encoded bundle of PEM CAs which will be used to validate the certificate - chain presented by Vault. Only used if using HTTPS to connect to Vault and - ignored for HTTP connections. - Mutually exclusive with CABundleSecretRef. - If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in - the cert-manager controller container is used to validate the TLS connection. + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. type: string format: byte caBundleSecretRef: - description: |- - Reference to a Secret containing a bundle of PEM-encoded CAs to use when - verifying the certificate chain presented by Vault when using HTTPS. - Mutually exclusive with CABundle. - If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in - the cert-manager controller container is used to validate the TLS connection. - If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - clientCertSecretRef: - description: |- - Reference to a Secret containing a PEM-encoded Client Certificate to use when the - Vault server requires mTLS. - type: object - required: - - name - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. - type: string - name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - clientKeySecretRef: - description: |- - Reference to a Secret containing a PEM-encoded Client Private Key to use when the - Vault server requires mTLS. - type: object - required: - - name - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. - type: string - name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' type: string path: - description: |- - Path is the mount path of the Vault PKI backend's `sign` endpoint, e.g: - "my_pki_mount/sign/my-role-name". + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' type: string server: description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' type: string venafi: - description: |- - Venafi configures this issuer to sign certificates using a Venafi TPP - or Venafi Cloud policy zone. + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. type: object required: - zone properties: cloud: - description: |- - Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. type: object required: - apiTokenSecretRef @@ -5341,96 +2871,59 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string url: - description: |- - URL is the base URL for Venafi Cloud. - Defaults to "https://api.venafi.cloud/v1". + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". type: string tpp: - description: |- - TPP specifies Trust Protection Platform configuration settings. - Only one of TPP or Cloud may be specified. + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. type: object required: - credentialsRef - url properties: caBundle: - description: |- - Base64-encoded bundle of PEM CAs which will be used to validate the certificate - chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. - If undefined, the certificate bundle in the cert-manager controller container - is used to validate the chain. + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. type: string format: byte credentialsRef: - description: |- - CredentialsRef is a reference to a Secret containing the username and - password for the TPP server. - The secret must contain two keys, 'username' and 'password'. + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. type: object required: - name properties: name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string url: - description: |- - URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, - for example: "https://tpp.example.com/vedsdk". + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' type: string zone: - description: |- - Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by the named - zone policy. - This field is required. + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. type: string status: description: Status of the ClusterIssuer. This is set and managed automatically. type: object properties: acme: - description: |- - ACME specific status options. - This field should only be set if the Issuer is configured to use an ACME - server to issue certificates. + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. type: object properties: lastPrivateKeyHash: - description: |- - LastPrivateKeyHash is a hash of the private key associated with the latest - registered ACME account, in order to track changes made to registered account - associated with the Issuer + description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer type: string lastRegisteredEmail: - description: |- - LastRegisteredEmail is the email associated with the latest registered - ACME account, in order to track changes made to registered account - associated with the Issuer + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer type: string uri: - description: |- - URI is the unique account identifier, which can also be used to retrieve - account details from the CA + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA type: string conditions: - description: |- - List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. type: array items: description: IssuerCondition contains condition information for an Issuer. @@ -5440,29 +2933,18 @@ spec: - type properties: lastTransitionTime: - description: |- - LastTransitionTime is the timestamp corresponding to the last status - change of this condition. + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. type: string format: date-time message: - description: |- - Message is a human readable description of the details of the last - transition, complementing reason. + description: Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: - description: |- - If set, this represents the .metadata.generation that the condition was - set based upon. - For instance, if .metadata.generation is currently 12, but the - .status.condition[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the Issuer. + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. type: integer format: int64 reason: - description: |- - Reason is a brief machine readable explanation for the condition's last - transition. + description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). @@ -5479,24 +2961,15 @@ spec: x-kubernetes-list-type: map served: true storage: true - -# END crd {{- end }} - --- -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: issuers.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' - app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/component: "crds" + app.kubernetes.io/instance: "{{ .Release.Name }}" # Generated labels {{- include "labels" . | nindent 4 }} spec: group: cert-manager.io @@ -5526,29 +2999,16 @@ spec: type: date schema: openAPIV3Schema: - description: |- - An Issuer represents a certificate issuing authority which can be - referenced as part of `issuerRef` fields. - It is scoped to a single namespace and can therefore only be referenced by - resources within the same namespace. + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. type: object required: - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -5557,65 +3017,34 @@ spec: type: object properties: acme: - description: |- - ACME configures this issuer to communicate with a RFC8555 (ACME) server - to obtain signed x509 certificates. + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. type: object required: - privateKeySecretRef - server properties: caBundle: - description: |- - Base64-encoded bundle of PEM CAs which can be used to validate the certificate - chain presented by the ACME server. - Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various - kinds of security vulnerabilities. - If CABundle and SkipTLSVerify are unset, the system certificate bundle inside - the container is used to validate the TLS connection. + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. type: string format: byte disableAccountKeyGeneration: - description: |- - Enables or disables generating a new ACME account key. - If true, the Issuer resource will *not* request a new account but will expect - the account key to be supplied via an existing secret. - If false, the cert-manager system will generate a new ACME account key - for the Issuer. - Defaults to false. + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. type: boolean email: - description: |- - Email is the email address to be associated with the ACME account. - This field is optional, but it is strongly recommended to be set. - It will be used to contact you in case of issues with your account or - certificates, including expiry notification emails. - This field may be updated after the account is initially registered. + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. type: string enableDurationFeature: - description: |- - Enables requesting a Not After date on certificates that matches the - duration of the certificate. This is not supported by all ACME servers - like Let's Encrypt. If set to true when the ACME server does not support - it, it will create an error on the Order. - Defaults to false. + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. type: boolean externalAccountBinding: - description: |- - ExternalAccountBinding is a reference to a CA external account of the ACME - server. - If set, upon registration cert-manager will attempt to associate the given - external account credentials with the registered ACME account. + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. type: object required: - keyID - keySecretRef properties: keyAlgorithm: - description: |- - Deprecated: keyAlgorithm field exists for historical compatibility - reasons and should not be used. The algorithm is now hardcoded to HS256 - in golang/x/crypto/acme. + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' type: string enum: - HS256 @@ -5625,130 +3054,68 @@ spec: description: keyID is the ID of the CA key that the External Account is bound to. type: string keySecretRef: - description: |- - keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes - Secret which holds the symmetric MAC key of the External Account Binding. - The `key` is the index string that is paired with the key data in the - Secret and should not be confused with the key data itself, or indeed with - the External Account Binding keyID above. - The secret key stored in the Secret **must** be un-padded, base64 URL - encoded data. + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string preferredChain: - description: |- - PreferredChain is the chain to use if the ACME server outputs multiple. - PreferredChain is no guarantee that this one gets delivered by the ACME - endpoint. - For example, for Let's Encrypt's DST crosssign you would use: - "DST Root CA X3" or "ISRG Root X1" for the newer Let's Encrypt root CA. - This value picks the first certificate bundle in the combined set of - ACME default and alternative chains that has a root-most certificate with - this value as its issuer's commonname. + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' type: string maxLength: 64 privateKeySecretRef: - description: |- - PrivateKey is the name of a Kubernetes Secret resource that will be used to - store the automatically generated ACME account private key. - Optionally, a `key` may be specified to select a specific entry within - the named Secret resource. - If `key` is not specified, a default of `tls.key` will be used. + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string server: - description: |- - Server is the URL used to access the ACME server's 'directory' endpoint. - For example, for Let's Encrypt's staging endpoint, you would use: - "https://acme-staging-v02.api.letsencrypt.org/directory". - Only ACME v2 endpoints (i.e. RFC 8555) are supported. + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' type: string skipTLSVerify: - description: |- - INSECURE: Enables or disables validation of the ACME server TLS certificate. - If true, requests to the ACME server will not have the TLS certificate chain - validated. - Mutually exclusive with CABundle; prefer using CABundle to prevent various - kinds of security vulnerabilities. - Only enable this option in development environments. - If CABundle and SkipTLSVerify are unset, the system certificate bundle inside - the container is used to validate the TLS connection. - Defaults to false. + description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.' type: boolean solvers: - description: |- - Solvers is a list of challenge solvers that will be used to solve - ACME challenges for the matching domains. - Solver configurations must be provided in order to obtain certificates - from an ACME server. - For more information, see: https://cert-manager.io/docs/configuration/acme/ + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' type: array items: - description: |- - An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. - A selector may be provided to use different solving strategies for different DNS names. - Only one of HTTP01 or DNS01 must be provided. + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. type: object properties: dns01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the DNS01 challenge flow. + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. type: object properties: acmeDNS: - description: |- - Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage - DNS01 challenge records. + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. type: object required: - accountSecretRef - host properties: accountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string host: type: string @@ -5762,61 +3129,40 @@ spec: - serviceConsumerDomain properties: accessTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string clientTokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string serviceConsumerDomain: type: string @@ -5828,30 +3174,19 @@ spec: - subscriptionID properties: clientID: - description: |- - Auth: Azure Service Principal: - The ClientID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientSecret and TenantID must also be set. + description: if both this and ClientSecret are left unset MSI will be used type: string clientSecretSecretRef: - description: |- - Auth: Azure Service Principal: - A reference to a Secret containing the password associated with the Service Principal. - If set, ClientID and TenantID must also be set. + description: if both this and ClientID are left unset MSI will be used type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string environment: description: name of the Azure environment (default AzurePublicCloud) @@ -5865,19 +3200,14 @@ spec: description: name of the DNS zone that should be used type: string managedIdentity: - description: |- - Auth: Azure Workload Identity or Azure Managed Service Identity: - Settings to enable Azure Workload Identity or Azure Managed Service Identity - If set, ClientID, ClientSecret and TenantID must not be set. + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID type: object properties: clientID: description: client ID of the managed identity, can not be used at the same time as resourceID type: string resourceID: - description: |- - resource ID of the managed identity, can not be used at the same time as clientID - Cannot be used for Azure Managed Service Identity + description: resource ID of the managed identity, can not be used at the same time as clientID type: string resourceGroupName: description: resource group the DNS zone is located in @@ -5886,10 +3216,7 @@ spec: description: ID of the Azure subscription type: string tenantID: - description: |- - Auth: Azure Service Principal: - The TenantID of the Azure Service Principal used to authenticate with Azure DNS. - If set, ClientID and ClientSecret must also be set. + description: when specifying ClientID and ClientSecret then this field is also needed type: string cloudDNS: description: Use the Google Cloud DNS API to manage DNS01 challenge records. @@ -5898,55 +3225,37 @@ spec: - project properties: hostedZoneName: - description: |- - HostedZoneName is an optional field that tells cert-manager in which - Cloud DNS zone the challenge record has to be created. - If left empty cert-manager will automatically choose a zone. + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. type: string project: type: string serviceAccountSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string cloudflare: description: Use the Cloudflare API to manage DNS01 challenge records. type: object properties: apiKeySecretRef: - description: |- - API key to use to authenticate with Cloudflare. - Note: using an API token to authenticate is now the recommended method - as it allows greater control of permissions. + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string apiTokenSecretRef: description: API token used to authenticate with Cloudflare. @@ -5955,23 +3264,16 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string email: description: Email of the account, only required when using API key based authentication. type: string cnameStrategy: - description: |- - CNAMEStrategy configures how the DNS01 provider should handle CNAME - records when found in DNS zones. + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. type: string enum: - None @@ -5983,69 +3285,43 @@ spec: - tokenSecretRef properties: tokenSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource. - In some instances, `key` is a required field. + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string rfc2136: - description: |- - Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) - to manage DNS01 challenge records. + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. type: object required: - nameserver properties: nameserver: - description: |- - The IP address or hostname of an authoritative DNS server supporting - RFC2136 in the form host:port. If the host is an IPv6 address it must be - enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. - This field is required. + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. type: string tsigAlgorithm: - description: |- - The TSIG Algorithm configured in the DNS supporting RFC2136. Used only - when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. - Supported values are (case-insensitive): ``HMACMD5`` (default), - ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' type: string tsigKeyName: - description: |- - The TSIG Key name configured in the DNS. - If ``tsigSecretSecretRef`` is defined, this field is required. + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. type: string tsigSecretSecretRef: - description: |- - The name of the secret containing the TSIG value. - If ``tsigKeyName`` is defined, this field is required. + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string route53: description: Use the AWS Route53 API to manage DNS01 challenge records. @@ -6054,71 +3330,20 @@ spec: - region properties: accessKeyID: - description: |- - The AccessKeyID is used for authentication. - Cannot be set when SecretAccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string accessKeyIDSecretRef: - description: |- - The SecretAccessKey is used for authentication. If set, pull the AWS - access key ID from a key within a Kubernetes Secret. - Cannot be set when AccessKeyID is set. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - auth: - description: Auth configures how cert-manager authenticates. - type: object - required: - - kubernetes - properties: - kubernetes: - description: |- - Kubernetes authenticates with Route53 using AssumeRoleWithWebIdentity - by passing a bound ServiceAccount token. - type: object - required: - - serviceAccountRef - properties: - serviceAccountRef: - description: |- - A reference to a service account that will be used to request a bound - token (also known as "projected token"). To use this field, you must - configure an RBAC rule to let cert-manager request a token. - type: object - required: - - name - properties: - audiences: - description: |- - TokenAudiences is an optional list of audiences to include in the - token passed to AWS. The default token consisting of the issuer's namespace - and name is always included. - If unset the audience defaults to `sts.amazonaws.com`. - type: array - items: - type: string - name: - description: Name of the ServiceAccount used to request a token. - type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -6126,301 +3351,113 @@ spec: description: Always set the region when using AccessKeyID and SecretAccessKey type: string role: - description: |- - Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey - or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: |- - The SecretAccessKey is used for authentication. - If neither the Access Key nor Key ID are set, we fall-back to using env - vars, shared credentials file or AWS Instance metadata, - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string webhook: - description: |- - Configure an external webhook based DNS01 challenge solver to manage - DNS01 challenge records. + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. type: object required: - groupName - solverName properties: config: - description: |- - Additional configuration that should be passed to the webhook apiserver - when challenges are processed. - This can contain arbitrary JSON data. - Secret values should not be specified in this stanza. - If secret values are needed (e.g. credentials for a DNS service), you - should use a SecretKeySelector to reference a Secret resource. - For details on the schema of this field, consult the webhook provider - implementation's documentation. + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. x-kubernetes-preserve-unknown-fields: true groupName: - description: |- - The API group name that should be used when POSTing ChallengePayload - resources to the webhook apiserver. - This should be the same as the GroupName specified in the webhook - provider implementation. + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. type: string solverName: - description: |- - The name of the solver to use, as defined in the webhook provider - implementation. - This will typically be the name of the provider, e.g. 'cloudflare'. + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. type: string http01: - description: |- - Configures cert-manager to attempt to complete authorizations by - performing the HTTP01 challenge flow. - It is not possible to obtain certificates for wildcard domain names - (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. type: object properties: gatewayHTTPRoute: - description: |- - The Gateway API is a sig-network community API that models service networking - in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will - create HTTPRoutes with the specified labels in the same namespace as the challenge. - This solver is experimental, and fields / behaviour may change in the future. + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. type: object properties: labels: - description: |- - Custom labels that will be applied to HTTPRoutes created by cert-manager - while solving HTTP-01 challenges. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string parentRefs: - description: |- - When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. - cert-manager needs to know which parentRefs should be used when creating - the HTTPRoute. Usually, the parentRef references a Gateway. See: - https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' type: array items: - description: |- - ParentReference identifies an API object (usually a Gateway) that can be considered - a parent of this resource (usually a route). There are two kinds of parent resources - with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - This API may be extended in the future to support additional kinds of parent - resources. - - - The API object must be valid in the cluster; the Group and Kind must - be registered in the cluster for this reference to be valid. + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." type: object required: - name properties: group: - description: |- - Group is the group of the referent. - When unspecified, "gateway.networking.k8s.io" is inferred. - To set the core API group (such as for a "Service" kind referent), - Group must be explicitly set to "" (empty string). - - - Support: Core + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" type: string default: gateway.networking.k8s.io maxLength: 253 pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ kind: - description: |- - Kind is kind of the referent. - - - There are two kinds of parent resources with "Core" support: - - - * Gateway (Gateway conformance profile) - * Service (Mesh conformance profile, ClusterIP Services only) - - - Support for other resources is Implementation-Specific. + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" type: string default: Gateway maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ name: - description: |- - Name is the name of the referent. - - - Support: Core + description: "Name is the name of the referent. \n Support: Core" type: string maxLength: 253 minLength: 1 namespace: - description: |- - Namespace is the namespace of the referent. When unspecified, this refers - to the local namespace of the Route. - - - Note that there are specific rules for ParentRefs which cross namespace - boundaries. Cross-namespace references are only valid if they are explicitly - allowed by something in the namespace they are referring to. For example: - Gateway has the AllowedRoutes field, and ReferenceGrant provides a - generic way to enable any other kind of cross-namespace reference. - - - - ParentRefs from a Route to a Service in the same namespace are "producer" - routes, which apply default routing rules to inbound connections from - any namespace to the Service. - - - ParentRefs from a Route to a Service in a different namespace are - "consumer" routes, and these routing rules are only applied to outbound - connections originating from the same namespace as the Route, for which - the intended destination of the connections are a Service targeted as a - ParentRef of the Route. - - - - Support: Core + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" type: string maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ port: - description: |- - Port is the network port this Route targets. It can be interpreted - differently based on the type of parent resource. - - - When the parent resource is a Gateway, this targets all listeners - listening on the specified port that also support this kind of Route(and - select this Route). It's not recommended to set `Port` unless the - networking behaviors specified in a Route must apply to a specific port - as opposed to a listener(s) whose port(s) may be changed. When both Port - and SectionName are specified, the name and port of the selected listener - must match both specified values. - - - - When the parent resource is a Service, this targets a specific port in the - Service spec. When both Port (experimental) and SectionName are specified, - the name and port of the selected port must match both specified values. - - - - Implementations MAY choose to support other parent resources. - Implementations supporting other types of parent resources MUST clearly - document how/if Port is interpreted. - - - For the purpose of status, an attachment is considered successful as - long as the parent resource accepts it partially. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment - from the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, - the Route MUST be considered detached from the Gateway. - - - Support: Extended + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " type: integer format: int32 maximum: 65535 minimum: 1 sectionName: - description: |- - SectionName is the name of a section within the target resource. In the - following resources, SectionName is interpreted as the following: - - - * Gateway: Listener name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - * Service: Port name. When both Port (experimental) and SectionName - are specified, the name and port of the selected listener must match - both specified values. - - - Implementations MAY choose to support attaching Routes to other resources. - If that is the case, they MUST clearly document how SectionName is - interpreted. - - - When unspecified (empty string), this will reference the entire resource. - For the purpose of status, an attachment is considered successful if at - least one section in the parent resource accepts it. For example, Gateway - listeners can restrict which Routes can attach to them by Route kind, - namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from - the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this Route, the - Route MUST be considered detached from the Gateway. - - - Support: Core + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" type: string maxLength: 253 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string ingress: - description: |- - The ingress based HTTP01 challenge solver will solve challenges by - creating or modifying Ingress resources in order to route requests for - '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are - provisioned by cert-manager for each Challenge to be completed. + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. type: object properties: class: - description: |- - This field configures the annotation `kubernetes.io/ingress.class` when - creating Ingress resources to solve ACME challenges that use this - challenge solver. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressClassName: - description: |- - This field configures the field `ingressClassName` on the created Ingress - resources used to solve ACME challenges that use this challenge solver. - This is the recommended way of configuring the ingress class. Only one of - `class`, `name` or `ingressClassName` may be specified. + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. type: string ingressTemplate: - description: |- - Optional ingress template used to configure the ACME challenge solver - ingress used for HTTP01 challenges. + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the ingress used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -6434,26 +3471,14 @@ spec: additionalProperties: type: string name: - description: |- - The name of the ingress resource that should have ACME challenge solving - routes inserted into it in order to solve HTTP01 challenges. - This is typically used in conjunction with ingress controllers like - ingress-gce, which maintains a 1:1 mapping between external IPs and - ingress resources. Only one of `class`, `name` or `ingressClassName` may - be specified. + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. type: string podTemplate: - description: |- - Optional pod template used to configure the ACME challenge solver pods - used for HTTP01 challenges. + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. type: object properties: metadata: - description: |- - ObjectMeta overrides for the pod used to solve HTTP01 challenges. - Only the 'labels' and 'annotations' fields may be set. - If labels or annotations overlap with in-built values, the values here - will override the in-built values. + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. type: object properties: annotations: @@ -6467,10 +3492,7 @@ spec: additionalProperties: type: string spec: - description: |- - PodSpec defines overrides for the HTTP01 challenge solver pod. - Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. - All other fields will be ignored. + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. type: object properties: affinity: @@ -6482,21 +3504,10 @@ spec: type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. type: array items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type: object required: - preference @@ -6510,9 +3521,7 @@ spec: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6522,29 +3531,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6554,35 +3552,20 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. type: object required: - nodeSelectorTerms @@ -6591,19 +3574,14 @@ spec: description: Required. A list of node selector terms. The terms are ORed. type: array items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. type: object properties: matchExpressions: description: A list of node selector requirements by node's labels. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6613,29 +3591,18 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. type: array items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6645,40 +3612,21 @@ spec: description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic - x-kubernetes-list-type: atomic x-kubernetes-map-type: atomic podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -6694,18 +3642,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6715,76 +3659,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6794,90 +3690,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6887,76 +3742,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -6966,64 +3773,33 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -7039,18 +3815,14 @@ spec: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -7060,76 +3832,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -7139,90 +3863,49 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. type: integer format: int32 - x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. type: array items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running type: object required: - topologyKey properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of resources, in this case pods. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -7232,76 +3915,28 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - type: array - items: - type: string - x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. type: array items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. type: object required: - key @@ -7311,75 +3946,40 @@ spec: description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. type: array items: type: string - x-kubernetes-list-type: atomic - x-kubernetes-list-type: atomic matchLabels: - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object additionalProperties: type: string x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string - x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. type: string - x-kubernetes-list-type: atomic imagePullSecrets: description: If specified, the pod's imagePullSecrets type: array items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. type: object properties: name: - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - default: "" x-kubernetes-map-type: atomic nodeSelector: - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object additionalProperties: type: string @@ -7393,146 +3993,77 @@ spec: description: If specified, the pod's tolerations. type: array items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . type: object properties: effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. type: string tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. type: integer format: int64 value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string serviceType: - description: |- - Optional service type for Kubernetes solver service. Supported values - are NodePort or ClusterIP. If unset, defaults to NodePort. + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string selector: - description: |- - Selector selects a set of DNSNames on the Certificate resource that - should be solved using this challenge solver. - If not specified, the solver will be treated as the 'default' solver - with the lowest priority, i.e. if any other solver has a more specific - match, it will be used instead. + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. type: object properties: dnsNames: - description: |- - List of DNSNames that this solver will be used to solve. - If specified and a match is found, a dnsNames selector will take - precedence over a dnsZones selector. - If multiple solvers match with the same dnsNames value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string dnsZones: - description: |- - List of DNSZones that this solver will be used to solve. - The most specific DNS zone match specified here will take precedence - over other DNS zone matches, so a solver specifying sys.example.com - will be selected over one specifying example.com for the domain - www.sys.example.com. - If multiple solvers match with the same dnsZones value, the solver - with the most matching labels in matchLabels will be selected. - If neither has more matches, the solver defined earlier in the list - will be selected. + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. type: array items: type: string matchLabels: - description: |- - A label selector that is used to refine the set of certificate's that - this challenge solver will apply to. + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. type: object additionalProperties: type: string ca: - description: |- - CA configures this issuer to sign certificates using a signing CA keypair - stored in a Secret resource. - This is used to build internal PKIs that are managed by cert-manager. + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. type: object required: - secretName properties: crlDistributionPoints: - description: |- - The CRL distribution points is an X.509 v3 certificate extension which identifies - the location of the CRL from which the revocation of this certificate can be checked. - If not set, certificates will be issued without distribution points set. - type: array - items: - type: string - issuingCertificateURLs: - description: |- - IssuingCertificateURLs is a list of URLs which this issuer should embed into certificates - it creates. See https://www.rfc-editor.org/rfc/rfc5280#section-4.2.2.1 for more details. - As an example, such a URL might be "http://ca.domain.com/ca.crt". + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. type: array items: type: string ocspServers: - description: |- - The OCSP server list is an X.509 v3 extension that defines a list of - URLs of OCSP responders. The OCSP responders can be queried for the - revocation status of an issued certificate. If not set, the - certificate will be issued with no OCSP servers set. For example, an - OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". type: array items: type: string secretName: - description: |- - SecretName is the name of the secret used to sign Certificates issued - by this Issuer. + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. type: string selfSigned: - description: |- - SelfSigned configures this issuer to 'self sign' certificates using the - private key used to create the CertificateRequest object. + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. type: object properties: crlDistributionPoints: - description: |- - The CRL distribution points is an X.509 v3 certificate extension which identifies - the location of the CRL from which the revocation of this certificate can be checked. - If not set certificate will be issued without CDP. Values are strings. + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. type: array items: type: string vault: - description: |- - Vault configures this issuer to sign certificates using a HashiCorp Vault - PKI backend. + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. type: object required: - auth @@ -7544,9 +4075,7 @@ spec: type: object properties: appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. type: object required: - path @@ -7554,94 +4083,53 @@ spec: - secretRef properties: path: - description: |- - Path where the App Role authentication backend is mounted in Vault, e.g: - "approle" + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' type: string roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. type: string secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. type: object required: - role properties: mountPath: - description: |- - The Vault mountPath here is the mount path to use when authenticating with - Vault. For example, setting a value to `/v1/auth/foo`, will use the path - `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the - default value "/v1/auth/kubernetes" will be used. + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. type: string role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. type: string secretRef: - description: |- - The required Secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. Use of 'ambient credentials' is not - supported. + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string serviceAccountRef: - description: |- - A reference to a service account that will be used to request a bound - token (also known as "projected token"). Compared to using "secretRef", - using this field means that you don't rely on statically bound tokens. To - use this field, you must configure an RBAC rule to let cert-manager - request a token. + description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. type: object required: - name properties: - audiences: - description: |- - TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token - consisting of the issuer's namespace and name is always included. - type: array - items: - type: string name: description: Name of the ServiceAccount used to request a token. type: string @@ -7652,112 +4140,44 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string caBundle: - description: |- - Base64-encoded bundle of PEM CAs which will be used to validate the certificate - chain presented by Vault. Only used if using HTTPS to connect to Vault and - ignored for HTTP connections. - Mutually exclusive with CABundleSecretRef. - If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in - the cert-manager controller container is used to validate the TLS connection. + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. type: string format: byte caBundleSecretRef: - description: |- - Reference to a Secret containing a bundle of PEM-encoded CAs to use when - verifying the certificate chain presented by Vault when using HTTPS. - Mutually exclusive with CABundle. - If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in - the cert-manager controller container is used to validate the TLS connection. - If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. type: object required: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - clientCertSecretRef: - description: |- - Reference to a Secret containing a PEM-encoded Client Certificate to use when the - Vault server requires mTLS. - type: object - required: - - name - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. - type: string - name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - clientKeySecretRef: - description: |- - Reference to a Secret containing a PEM-encoded Client Private Key to use when the - Vault server requires mTLS. - type: object - required: - - name - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. - type: string - name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' type: string path: - description: |- - Path is the mount path of the Vault PKI backend's `sign` endpoint, e.g: - "my_pki_mount/sign/my-role-name". + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' type: string server: description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' type: string venafi: - description: |- - Venafi configures this issuer to sign certificates using a Venafi TPP - or Venafi Cloud policy zone. + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. type: object required: - zone properties: cloud: - description: |- - Cloud specifies the Venafi cloud configuration settings. - Only one of TPP or Cloud may be specified. + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. type: object required: - apiTokenSecretRef @@ -7769,96 +4189,59 @@ spec: - name properties: key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. - Some instances of this field may be defaulted, in others it may be - required. + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. type: string name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string url: - description: |- - URL is the base URL for Venafi Cloud. - Defaults to "https://api.venafi.cloud/v1". + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". type: string tpp: - description: |- - TPP specifies Trust Protection Platform configuration settings. - Only one of TPP or Cloud may be specified. + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. type: object required: - credentialsRef - url properties: caBundle: - description: |- - Base64-encoded bundle of PEM CAs which will be used to validate the certificate - chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. - If undefined, the certificate bundle in the cert-manager controller container - is used to validate the chain. + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. type: string format: byte credentialsRef: - description: |- - CredentialsRef is a reference to a Secret containing the username and - password for the TPP server. - The secret must contain two keys, 'username' and 'password'. + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. type: object required: - name properties: name: - description: |- - Name of the resource being referred to. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string url: - description: |- - URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, - for example: "https://tpp.example.com/vedsdk". + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' type: string zone: - description: |- - Zone is the Venafi Policy Zone to use for this issuer. - All requests made to the Venafi platform will be restricted by the named - zone policy. - This field is required. + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. type: string status: description: Status of the Issuer. This is set and managed automatically. type: object properties: acme: - description: |- - ACME specific status options. - This field should only be set if the Issuer is configured to use an ACME - server to issue certificates. + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. type: object properties: lastPrivateKeyHash: - description: |- - LastPrivateKeyHash is a hash of the private key associated with the latest - registered ACME account, in order to track changes made to registered account - associated with the Issuer + description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer type: string lastRegisteredEmail: - description: |- - LastRegisteredEmail is the email associated with the latest registered - ACME account, in order to track changes made to registered account - associated with the Issuer + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer type: string uri: - description: |- - URI is the unique account identifier, which can also be used to retrieve - account details from the CA + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA type: string conditions: - description: |- - List of status conditions to indicate the status of a CertificateRequest. - Known condition types are `Ready`. + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. type: array items: description: IssuerCondition contains condition information for an Issuer. @@ -7868,29 +4251,18 @@ spec: - type properties: lastTransitionTime: - description: |- - LastTransitionTime is the timestamp corresponding to the last status - change of this condition. + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. type: string format: date-time message: - description: |- - Message is a human readable description of the details of the last - transition, complementing reason. + description: Message is a human readable description of the details of the last transition, complementing reason. type: string observedGeneration: - description: |- - If set, this represents the .metadata.generation that the condition was - set based upon. - For instance, if .metadata.generation is currently 12, but the - .status.condition[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the Issuer. + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. type: integer format: int64 reason: - description: |- - Reason is a brief machine readable explanation for the condition's last - transition. + description: Reason is a brief machine readable explanation for the condition's last transition. type: string status: description: Status of the condition, one of (`True`, `False`, `Unknown`). @@ -7907,24 +4279,15 @@ spec: x-kubernetes-list-type: map served: true storage: true - -# END crd {{- end }} - --- -# START crd {{- if or .Values.crds.enabled .Values.installCRDs }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: orders.acme.cert-manager.io - # START annotations {{- if .Values.crds.keep }} - annotations: - helm.sh/resource-policy: keep - # END annotations {{- end }} labels: app: '{{ template "cert-manager.name" . }}' app.kubernetes.io/name: '{{ template "cert-manager.name" . }}' app.kubernetes.io/instance: '{{ .Release.Name }}' - app.kubernetes.io/component: "crds" # Generated labels {{- include "labels" . | nindent 4 }} spec: group: acme.cert-manager.io @@ -7966,19 +4329,10 @@ spec: - spec properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -7989,39 +4343,23 @@ spec: - request properties: commonName: - description: |- - CommonName is the common name as specified on the DER encoded CSR. - If specified, this value must also be present in `dnsNames` or `ipAddresses`. - This field must match the corresponding field on the DER encoded CSR. + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. type: string dnsNames: - description: |- - DNSNames is a list of DNS names that should be included as part of the Order - validation process. - This field must match the corresponding field on the DER encoded CSR. + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. type: array items: type: string duration: - description: |- - Duration is the duration for the not after date for the requested certificate. - this is set on order creation as pe the ACME spec. + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. type: string ipAddresses: - description: |- - IPAddresses is a list of IP addresses that should be included as part of the Order - validation process. - This field must match the corresponding field on the DER encoded CSR. + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. type: array items: type: string issuerRef: - description: |- - IssuerRef references a properly configured ACME-type Issuer which should - be used to create this Order. - If the Issuer does not exist, processing will be retried. - If the Issuer is not an 'ACME' Issuer, an error will be returned and the - Order will be marked as failed. + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. type: object required: - name @@ -8036,42 +4374,26 @@ spec: description: Name of the resource being referred to. type: string request: - description: |- - Certificate signing request bytes in DER encoding. - This will be used when finalizing the order. - This field must be set on the order. + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. type: string format: byte status: type: object properties: authorizations: - description: |- - Authorizations contains data returned from the ACME server on what - authorizations must be completed in order to validate the DNS names - specified on the Order. + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. type: array items: - description: |- - ACMEAuthorization contains data returned from the ACME server on an - authorization that must be completed in order validate a DNS name on an ACME - Order resource. + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. type: object required: - url properties: challenges: - description: |- - Challenges specifies the challenge types offered by the ACME server. - One of these challenge types will be selected when validating the DNS - name and an appropriate Challenge resource will be created to perform - the ACME challenge process. + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. type: array items: - description: |- - Challenge specifies a challenge offered by the ACME server for an Order. - An appropriate Challenge resource can be created to perform the ACME - challenge process. + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. type: object required: - token @@ -8079,36 +4401,19 @@ spec: - url properties: token: - description: |- - Token is the token that must be presented for this challenge. - This is used to compute the 'key' that must also be presented. + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. type: string type: - description: |- - Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', - 'tls-sni-01', etc. - This is the raw value retrieved from the ACME server. - Only 'http-01' and 'dns-01' are supported by cert-manager, other values - will be ignored. + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. type: string url: - description: |- - URL is the URL of this challenge. It can be used to retrieve additional - metadata about the Challenge from the ACME server. + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. type: string identifier: description: Identifier is the DNS name to be validated as part of this authorization type: string initialState: - description: |- - InitialState is the initial state of the ACME authorization when first - fetched from the ACME server. - If an Authorization is already 'valid', the Order controller will not - create a Challenge resource for the authorization. This will occur when - working with an ACME server that enables 'authz reuse' (such as Let's - Encrypt's production endpoint). - If not set and 'identifier' is set, the state is assumed to be pending - and a Challenge will be created. + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. type: string enum: - valid @@ -8122,41 +4427,24 @@ spec: description: URL is the URL of the Authorization that must be completed type: string wildcard: - description: |- - Wildcard will be true if this authorization is for a wildcard DNS name. - If this is true, the identifier will be the *non-wildcard* version of - the DNS name. - For example, if '*.example.com' is the DNS name being validated, this - field will be 'true' and the 'identifier' field will be 'example.com'. + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. type: boolean certificate: - description: |- - Certificate is a copy of the PEM encoded certificate for this Order. - This field will be populated after the order has been successfully - finalized with the ACME server, and the order has transitioned to the - 'valid' state. + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. type: string format: byte failureTime: - description: |- - FailureTime stores the time that this order failed. - This is used to influence garbage collection and back-off. + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. type: string format: date-time finalizeURL: - description: |- - FinalizeURL of the Order. - This is used to obtain certificates for this order once it has been completed. + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. type: string reason: - description: |- - Reason optionally provides more information about a why the order is in - the current state. + description: Reason optionally provides more information about a why the order is in the current state. type: string state: - description: |- - State contains the current state of this Order resource. - States 'success' and 'expired' are 'final' + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' type: string enum: - valid @@ -8167,13 +4455,8 @@ spec: - expired - errored url: - description: |- - URL of the Order. - This will initially be empty when the resource is first created. - The Order controller will populate this field when the Order is first processed. - This field will be immutable after it is initially set. + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. type: string served: true storage: true - -# END crd {{- end }} +{{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/deployment.yaml b/internal/constellation/helm/charts/cert-manager/templates/deployment.yaml index e6f3f681e..aea5736c0 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/deployment.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/deployment.yaml @@ -15,10 +15,6 @@ metadata: {{- end }} spec: replicas: {{ .Values.replicaCount }} - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.revisionHistoryLimit) (list "" (quote ""))) }} - revisionHistoryLimit: {{ .Values.global.revisionHistoryLimit }} - {{- end }} selector: matchLabels: app.kubernetes.io/name: {{ template "cert-manager.name" . }} @@ -43,7 +39,7 @@ spec: annotations: {{- toYaml . | nindent 8 }} {{- end }} - {{- if and .Values.prometheus.enabled (not (or .Values.prometheus.servicemonitor.enabled .Values.prometheus.podmonitor.enabled)) }} + {{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }} {{- if not .Values.podAnnotations }} annotations: {{- end }} @@ -56,7 +52,6 @@ spec: {{- if hasKey .Values "automountServiceAccountToken" }} automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} {{- end }} - enableServiceLinks: {{ .Values.enableServiceLinks }} {{- with .Values.global.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} @@ -64,30 +59,20 @@ spec: securityContext: {{- toYaml . | nindent 8 }} {{- end }} - {{- if or .Values.volumes .Values.config}} + {{- with .Values.volumes }} volumes: - {{- if .Values.config }} - - name: config - configMap: - name: {{ include "cert-manager.fullname" . }} - {{- end }} - {{ with .Values.volumes }} {{- toYaml . | nindent 8 }} - {{- end }} {{- end }} containers: - name: {{ .Chart.Name }}-controller - image: "{{ template "image" (tuple .Values.image $.Chart.AppVersion) }}" + {{- with .Values.image }} + image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.logLevel) (list "" (quote ""))) }} + {{- if .Values.global.logLevel }} - --v={{ .Values.global.logLevel }} {{- end }} - {{- if .Values.config }} - - --config=/var/cert-manager/config/config.yaml - {{- end }} - {{- $config := default .Values.config "" }} {{- if .Values.clusterResourceNamespace }} - --cluster-resource-namespace={{ .Values.clusterResourceNamespace }} {{- else }} @@ -137,9 +122,6 @@ spec: {{- with .Values.dns01RecursiveNameservers }} - --dns01-recursive-nameservers={{ . }} {{- end }} - {{- if .Values.disableAutoApproval }} - - --controllers=-certificaterequests-approver - {{- end }} ports: - containerPort: 9402 name: http-metrics @@ -151,15 +133,9 @@ spec: securityContext: {{- toYaml . | nindent 12 }} {{- end }} - {{- if or .Values.config .Values.volumeMounts }} + {{- with .Values.volumeMounts }} volumeMounts: - {{- if .Values.config }} - - name: config - mountPath: /var/cert-manager/config - {{- end }} - {{- with .Values.volumeMounts }} {{- toYaml . | nindent 12 }} - {{- end }} {{- end }} env: - name: POD_NAMESPACE @@ -226,6 +202,3 @@ spec: dnsConfig: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.hostAliases }} - hostAliases: {{ toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file diff --git a/internal/constellation/helm/charts/cert-manager/templates/extras-objects.yaml b/internal/constellation/helm/charts/cert-manager/templates/extras-objects.yaml deleted file mode 100644 index 9ec3a7e9b..000000000 --- a/internal/constellation/helm/charts/cert-manager/templates/extras-objects.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{ range .Values.extraObjects }} ---- -{{ tpl . $ }} -{{ end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-egress.yaml b/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-egress.yaml index 37f90bd2e..09712009d 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-egress.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-egress.yaml @@ -11,9 +11,13 @@ spec: {{- end }} podSelector: matchLabels: + app: {{ include "webhook.name" . }} app.kubernetes.io/name: {{ include "webhook.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: "webhook" + {{- with .Values.webhook.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} policyTypes: - Egress {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-webhooks.yaml b/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-webhooks.yaml index 3a0ed7a70..349877a8b 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-webhooks.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/networkpolicy-webhooks.yaml @@ -12,9 +12,13 @@ spec: {{- end }} podSelector: matchLabels: - app.kubernetes.io/name: {{ include "webhook.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: "webhook" + app: {{ include "webhook.name" . }} + app.kubernetes.io/name: {{ include "webhook.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: "webhook" + {{- with .Values.webhook.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} policyTypes: - Ingress diff --git a/internal/constellation/helm/charts/cert-manager/templates/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cert-manager/templates/poddisruptionbudget.yaml index ae71eed29..dab75ce68 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/poddisruptionbudget.yaml @@ -17,13 +17,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: "controller" - {{- if not (or (hasKey .Values.podDisruptionBudget "minAvailable") (hasKey .Values.podDisruptionBudget "maxUnavailable")) }} - minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} {{- end }} - {{- if hasKey .Values.podDisruptionBudget "minAvailable" }} - minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if hasKey .Values.podDisruptionBudget "maxUnavailable" }} - maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/podmonitor.yaml b/internal/constellation/helm/charts/cert-manager/templates/podmonitor.yaml deleted file mode 100644 index 1adc0609c..000000000 --- a/internal/constellation/helm/charts/cert-manager/templates/podmonitor.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- if and .Values.prometheus.enabled (and .Values.prometheus.podmonitor.enabled .Values.prometheus.servicemonitor.enabled) }} -{{- fail "Either .Values.prometheus.podmonitor.enabled or .Values.prometheus.servicemonitor.enabled can be enabled at a time, but not both." }} -{{- else if and .Values.prometheus.enabled .Values.prometheus.podmonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: {{ template "cert-manager.fullname" . }} -{{- if .Values.prometheus.podmonitor.namespace }} - namespace: {{ .Values.prometheus.podmonitor.namespace }} -{{- else }} - namespace: {{ include "cert-manager.namespace" . }} -{{- end }} - labels: - app: {{ include "cert-manager.name" . }} - app.kubernetes.io/name: {{ include "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: "controller" - {{- include "labels" . | nindent 4 }} - prometheus: {{ .Values.prometheus.podmonitor.prometheusInstance }} - {{- with .Values.prometheus.podmonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if .Values.prometheus.podmonitor.annotations }} - annotations: - {{- with .Values.prometheus.podmonitor.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} -spec: - jobLabel: {{ template "cert-manager.fullname" . }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: "controller" -{{- if .Values.prometheus.podmonitor.namespace }} - namespaceSelector: - matchNames: - - {{ include "cert-manager.namespace" . }} -{{- end }} - podMetricsEndpoints: - - port: http-metrics - path: {{ .Values.prometheus.podmonitor.path }} - interval: {{ .Values.prometheus.podmonitor.interval }} - scrapeTimeout: {{ .Values.prometheus.podmonitor.scrapeTimeout }} - honorLabels: {{ .Values.prometheus.podmonitor.honorLabels }} - {{- with .Values.prometheus.servicemonitor.endpointAdditionalProperties }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/rbac.yaml b/internal/constellation/helm/charts/cert-manager/templates/rbac.yaml index 7a27d4f7a..830e37285 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/rbac.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/rbac.yaml @@ -398,26 +398,6 @@ subjects: namespace: {{ include "cert-manager.namespace" . }} kind: ServiceAccount -{{- if .Values.global.rbac.aggregateClusterRoles }} ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "cert-manager.fullname" . }}-cluster-view - labels: - app: {{ include "cert-manager.name" . }} - app.kubernetes.io/name: {{ include "cert-manager.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: "controller" - {{- include "labels" . | nindent 4 }} - rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" -rules: - - apiGroups: ["cert-manager.io"] - resources: ["clusterissuers"] - verbs: ["get", "list", "watch"] - -{{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -434,7 +414,6 @@ metadata: rbac.authorization.k8s.io/aggregate-to-view: "true" rbac.authorization.k8s.io/aggregate-to-edit: "true" rbac.authorization.k8s.io/aggregate-to-admin: "true" - rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" {{- end }} rules: - apiGroups: ["cert-manager.io"] @@ -474,8 +453,6 @@ rules: --- -{{- if not .Values.disableAutoApproval -}} - # Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -491,12 +468,7 @@ rules: - apiGroups: ["cert-manager.io"] resources: ["signers"] verbs: ["approve"] - {{- with .Values.approveSignerNames }} - resourceNames: - {{- range . }} - - {{ . | quote }} - {{- end }} - {{- end }} + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] --- @@ -521,8 +493,6 @@ subjects: --- -{{- end -}} - # Permission to: # - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers # - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers diff --git a/internal/constellation/helm/charts/cert-manager/templates/service.yaml b/internal/constellation/helm/charts/cert-manager/templates/service.yaml index 360ec645e..ec34d5878 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/service.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/service.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.prometheus.enabled (not .Values.prometheus.podmonitor.enabled) }} +{{- if .Values.prometheus.enabled }} apiVersion: v1 kind: Service metadata: @@ -19,12 +19,6 @@ metadata: {{- end }} spec: type: ClusterIP - {{- if .Values.serviceIPFamilyPolicy }} - ipFamilyPolicy: {{ .Values.serviceIPFamilyPolicy }} - {{- end }} - {{- if .Values.serviceIPFamilies }} - ipFamilies: {{ .Values.serviceIPFamilies | toYaml | nindent 2 }} - {{- end }} ports: - protocol: TCP port: 9402 diff --git a/internal/constellation/helm/charts/cert-manager/templates/serviceaccount.yaml b/internal/constellation/helm/charts/cert-manager/templates/serviceaccount.yaml index 87fc00ea7..6026842ff 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/serviceaccount.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/serviceaccount.yaml @@ -20,6 +20,6 @@ metadata: app.kubernetes.io/component: "controller" {{- include "labels" . | nindent 4 }} {{- with .Values.serviceAccount.labels }} - {{- toYaml . | nindent 4 }} + {{ toYaml . | nindent 4 }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/servicemonitor.yaml b/internal/constellation/helm/charts/cert-manager/templates/servicemonitor.yaml index b63886077..9d9e89992 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/servicemonitor.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/servicemonitor.yaml @@ -1,6 +1,4 @@ -{{- if and .Values.prometheus.enabled (and .Values.prometheus.podmonitor.enabled .Values.prometheus.servicemonitor.enabled) }} -{{- fail "Either .Values.prometheus.podmonitor.enabled or .Values.prometheus.servicemonitor.enabled can be enabled at a time, but not both." }} -{{- else if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }} +{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -44,7 +42,4 @@ spec: interval: {{ .Values.prometheus.servicemonitor.interval }} scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }} honorLabels: {{ .Values.prometheus.servicemonitor.honorLabels }} - {{- with .Values.prometheus.servicemonitor.endpointAdditionalProperties }} - {{- toYaml . | nindent 4 }} - {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/startupapicheck-job.yaml b/internal/constellation/helm/charts/cert-manager/templates/startupapicheck-job.yaml index 311b4c48e..a9b965e18 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/startupapicheck-job.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/startupapicheck-job.yaml @@ -37,7 +37,6 @@ spec: {{- if hasKey .Values.startupapicheck "automountServiceAccountToken" }} automountServiceAccountToken: {{ .Values.startupapicheck.automountServiceAccountToken }} {{- end }} - enableServiceLinks: {{ .Values.startupapicheck.enableServiceLinks }} {{- with .Values.global.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} @@ -47,7 +46,9 @@ spec: {{- end }} containers: - name: {{ .Chart.Name }}-startupapicheck - image: "{{ template "image" (tuple .Values.startupapicheck.image $.Chart.AppVersion) }}" + {{- with .Values.startupapicheck.image }} + image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.startupapicheck.image.pullPolicy }} args: - check diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-config.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-config.yaml index 8f3ce20c3..f3f72f02e 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-config.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-config.yaml @@ -1,6 +1,12 @@ {{- if .Values.webhook.config -}} -{{- $_ := .Values.webhook.config.apiVersion | required ".Values.webhook.config.apiVersion must be set !" -}} -{{- $_ := .Values.webhook.config.kind | required ".Values.webhook.config.kind must be set !" -}} + {{- if not .Values.webhook.config.apiVersion -}} + {{- fail "webhook.config.apiVersion must be set" -}} + {{- end -}} + + {{- if not .Values.webhook.config.kind -}} + {{- fail "webhook.config.kind must be set" -}} + {{- end -}} +{{- end -}} apiVersion: v1 kind: ConfigMap metadata: @@ -13,6 +19,7 @@ metadata: app.kubernetes.io/component: "webhook" {{- include "labels" . | nindent 4 }} data: + {{- if .Values.webhook.config }} config.yaml: | - {{- .Values.webhook.config | toYaml | nindent 4 }} -{{- end -}} \ No newline at end of file + {{ .Values.webhook.config | toYaml | nindent 4 }} + {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-deployment.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-deployment.yaml index ae5399e90..043c4b150 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-deployment.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-deployment.yaml @@ -15,10 +15,6 @@ metadata: {{- end }} spec: replicas: {{ .Values.webhook.replicaCount }} - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.revisionHistoryLimit) (list "" (quote ""))) }} - revisionHistoryLimit: {{ .Values.global.revisionHistoryLimit }} - {{- end }} selector: matchLabels: app.kubernetes.io/name: {{ include "webhook.name" . }} @@ -48,7 +44,6 @@ spec: {{- if hasKey .Values.webhook "automountServiceAccountToken" }} automountServiceAccountToken: {{ .Values.webhook.automountServiceAccountToken }} {{- end }} - enableServiceLinks: {{ .Values.webhook.enableServiceLinks }} {{- with .Values.global.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} @@ -59,16 +54,14 @@ spec: {{- if .Values.webhook.hostNetwork }} hostNetwork: true {{- end }} - {{- if .Values.webhook.hostNetwork }} - dnsPolicy: ClusterFirstWithHostNet - {{- end }} containers: - name: {{ .Chart.Name }}-webhook - image: "{{ template "image" (tuple .Values.webhook.image $.Chart.AppVersion) }}" + {{- with .Values.webhook.image }} + image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}" + {{- end }} imagePullPolicy: {{ .Values.webhook.image.pullPolicy }} args: - {{- /* The if statement below is equivalent to {{- if $value }} but will also return true for 0. */ -}} - {{- if not (has (quote .Values.global.logLevel) (list "" (quote ""))) }} + {{- if .Values.global.logLevel }} - --v={{ .Values.global.logLevel }} {{- end }} {{- if .Values.webhook.config }} @@ -78,8 +71,8 @@ spec: {{ if not $config.securePort -}} - --secure-port={{ .Values.webhook.securePort }} {{- end }} - {{- if .Values.webhook.featureGates }} - - --feature-gates={{ .Values.webhook.featureGates }} + {{- if .Values.featureGates }} + - --feature-gates={{ .Values.featureGates }} {{- end }} {{- $tlsConfig := default $config.tlsConfig "" }} {{ if or (not $config.tlsConfig) (and (not $tlsConfig.dynamic) (not $tlsConfig.filesystem) ) -}} @@ -159,8 +152,8 @@ spec: - name: config mountPath: /var/cert-manager/config {{- end }} - {{- with .Values.webhook.volumeMounts }} - {{- toYaml . | nindent 12 }} + {{- if .Values.webhook.volumeMounts }} + {{- toYaml .Values.webhook.volumeMounts | nindent 12 }} {{- end }} {{- end }} {{- with .Values.webhook.nodeSelector }} @@ -186,7 +179,7 @@ spec: configMap: name: {{ include "webhook.fullname" . }} {{- end }} - {{- with .Values.webhook.volumes }} - {{- toYaml . | nindent 8 }} + {{- if .Values.webhook.volumes }} + {{- toYaml .Values.webhook.volumes | nindent 8 }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-mutating-webhook.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-mutating-webhook.yaml index 9ea29777d..f3db011ef 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-mutating-webhook.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-mutating-webhook.yaml @@ -15,19 +15,17 @@ metadata: {{- end }} webhooks: - name: webhook.cert-manager.io - {{- with .Values.webhook.mutatingWebhookConfiguration.namespaceSelector }} - namespaceSelector: - {{- toYaml . | nindent 6 }} - {{- end }} rules: - apiGroups: - "cert-manager.io" + - "acme.cert-manager.io" apiVersions: - "v1" operations: - CREATE + - UPDATE resources: - - "certificaterequests" + - "*/*" admissionReviewVersions: ["v1"] # This webhook only accepts v1 cert-manager resources. # Equivalent matchPolicy ensures that non-v1 resource requests are sent to @@ -45,4 +43,4 @@ webhooks: name: {{ template "webhook.fullname" . }} namespace: {{ include "cert-manager.namespace" . }} path: /mutate - {{- end }} \ No newline at end of file + {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml index ab2a48109..c8a357cb1 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-poddisruptionbudget.yaml @@ -17,13 +17,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: "webhook" - {{- if not (or (hasKey .Values.webhook.podDisruptionBudget "minAvailable") (hasKey .Values.webhook.podDisruptionBudget "maxUnavailable")) }} - minAvailable: 1 # Default value because minAvailable and maxUnavailable are not set + {{- with .Values.webhook.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} {{- end }} - {{- if hasKey .Values.webhook.podDisruptionBudget "minAvailable" }} - minAvailable: {{ .Values.webhook.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if hasKey .Values.webhook.podDisruptionBudget "maxUnavailable" }} - maxUnavailable: {{ .Values.webhook.podDisruptionBudget.maxUnavailable }} + {{- with .Values.webhook.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-service.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-service.yaml index 86d47f164..5f9395049 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-service.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-service.yaml @@ -18,12 +18,6 @@ metadata: {{- end }} spec: type: {{ .Values.webhook.serviceType }} - {{- if .Values.webhook.serviceIPFamilyPolicy }} - ipFamilyPolicy: {{ .Values.webhook.serviceIPFamilyPolicy }} - {{- end }} - {{- if .Values.webhook.serviceIPFamilies }} - ipFamilies: {{ .Values.webhook.serviceIPFamilies | toYaml | nindent 2 }} - {{- end }} {{- with .Values.webhook.loadBalancerIP }} loadBalancerIP: {{ . }} {{- end }} diff --git a/internal/constellation/helm/charts/cert-manager/templates/webhook-validating-webhook.yaml b/internal/constellation/helm/charts/cert-manager/templates/webhook-validating-webhook.yaml index 76235fdee..a5d168e29 100644 --- a/internal/constellation/helm/charts/cert-manager/templates/webhook-validating-webhook.yaml +++ b/internal/constellation/helm/charts/cert-manager/templates/webhook-validating-webhook.yaml @@ -15,10 +15,16 @@ metadata: {{- end }} webhooks: - name: webhook.cert-manager.io - {{- with .Values.webhook.validatingWebhookConfiguration.namespaceSelector }} namespaceSelector: - {{- toYaml . | nindent 6 }} - {{- end }} + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - {{ include "cert-manager.namespace" . }} rules: - apiGroups: - "cert-manager.io" diff --git a/internal/constellation/helm/charts/cert-manager/values.yaml b/internal/constellation/helm/charts/cert-manager/values.yaml index ae304af7b..0870178c4 100644 --- a/internal/constellation/helm/charts/cert-manager/values.yaml +++ b/internal/constellation/helm/charts/cert-manager/values.yaml @@ -1,376 +1,199 @@ -# +docs:section=Global - # Default values for cert-manager. # This is a YAML-formatted file. # Declare variables to be passed into your templates. global: - # Reference to one or more secrets to be used when pulling images. - # For more information, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - # - # For example: - # imagePullSecrets: - # - name: "image-pull-secret" + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] - # Labels to apply to all resources. + # - name: "image-pull-secret" + + # Labels to apply to all resources # Please note that this does not add labels to the resources created dynamically by the controllers. # For these resources, you have to add the labels in the template in the cert-manager custom resource: - # For example, podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress - # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress). - # For example, secretTemplate in CertificateSpec - # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec). + # eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress + # ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress + # eg. secretTemplate in CertificateSpec + # ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec commonLabels: {} - # The number of old ReplicaSets to retain to allow rollback (if not set, the default Kubernetes value is set to 10). - # +docs:property - # revisionHistoryLimit: 1 + # team_name: dev - # The optional priority class to be used for the cert-manager pods. + # Optional priority class to be used for the cert-manager pods priorityClassName: "" rbac: - # Create required ClusterRoles and ClusterRoleBindings for cert-manager. create: true - # Aggregate ClusterRoles to Kubernetes default user-facing roles. For more information, see [User-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) + # Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles aggregateClusterRoles: true podSecurityPolicy: - # Create PodSecurityPolicy for cert-manager. - # - # Note that PodSecurityPolicy was deprecated in Kubernetes 1.21 and removed in Kubernetes 1.25. enabled: false - # Configure the PodSecurityPolicy to use AppArmor. useAppArmor: true - # Set the verbosity of cert-manager. A range of 0 - 6, with 6 being the most verbose. + # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. logLevel: 2 leaderElection: - # Override the namespace used for the leader election lease. + # Override the namespace used for the leader election lease namespace: "kube-system" # The duration that non-leader candidates will wait after observing a # leadership renewal until attempting to acquire leadership of a led but # unrenewed leader slot. This is effectively the maximum duration that a # leader can be stopped before it is replaced by another candidate. - # +docs:property # leaseDuration: 60s # The interval between attempts by the acting master to renew a leadership # slot before it stops leading. This must be less than or equal to the # lease duration. -# +docs:property # renewDeadline: 40s # The duration the clients should wait between attempting acquisition and # renewal of a leadership. -# +docs:property # retryPeriod: 15s - -# This option is equivalent to setting crds.enabled=true and crds.keep=true. -# Deprecated: use crds.enabled and crds.keep instead. installCRDs: false -crds: - # This option decides if the CRDs should be installed - # as part of the Helm installation. - enabled: false - # This option makes it so that the "helm.sh/resource-policy": keep - # annotation is added to the CRD. This will prevent Helm from uninstalling - # the CRD when the Helm release is uninstalled. - # WARNING: when the CRDs are removed, all cert-manager custom resources - # (Certificates, Issuers, ...) will be removed too by the garbage collector. - keep: true -# +docs:section=Controller - -# The number of replicas of the cert-manager controller to run. -# -# The default is 1, but in production set this to 2 or 3 to provide high -# availability. -# -# If `replicas > 1`, consider setting `podDisruptionBudget.enabled=true`. -# -# Note that cert-manager uses leader election to ensure that there can -# only be a single instance active at a time. replicaCount: 1 -# Deployment update strategy for the cert-manager controller deployment. -# For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). -# -# For example: -# strategy: -# type: RollingUpdate -# rollingUpdate: -# maxSurge: 0 -# maxUnavailable: 1 strategy: {} -podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - # This configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 -# This configures the maximum unavailable pods for disruptions. It can either be set to -# an integer (e.g. 1) or a percentage value (e.g. 25%). -# it cannot be used if `minAvailable` is set. -# +docs:property -# maxUnavailable: 1 +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 0 +# maxUnavailable: 1 -# A comma-separated list of feature gates that should be enabled on the -# controller pod. +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 +# minAvailable and maxUnavailable can either be set to an integer (e.g. 1) +# or a percentage value (e.g. 25%) + +# Comma separated list of feature gates that should be enabled on the controller +# Note: do not use this field to pass feature gate values into webhook +# component as this behaviour relies on a bug that will be fixed in cert-manager 1.13 +# https://github.com/cert-manager/cert-manager/pull/6093 +# Use webhook.extraArgs to pass --feature-gates flag directly instead. featureGates: "" -# The maximum number of challenges that can be scheduled as 'processing' at once. +# The maximum number of challenges that can be scheduled as 'processing' at once maxConcurrentChallenges: 60 image: - # The container registry to pull the manager image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager controller. - # +docs:property repository: quay.io/jetstack/cert-manager-controller + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-controller + # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z + # If no value is set, the chart's appVersion will be used. + # tag: canary - # Setting a digest will override any tag. - # +docs:property + # Setting a digest will override any tag # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. pullPolicy: IfNotPresent - digest: sha256:9b5d5e9c0fd4944221d059921cc05f388c9a5fc0b02a60b47f0eccfcd8243331 + digest: sha256:fb2546fe51e49206dbf72bb0d6f909a0018eda0c2b024547b03d3f3d604e4c5e # Override the namespace used to store DNS provider credentials etc. for ClusterIssuer # resources. By default, the same namespace as cert-manager is deployed within is # used. This namespace will not be automatically created by the Helm chart. clusterResourceNamespace: "" -# This namespace allows you to define where the services are installed into. -# If not set then they use the namespace of the release. -# This is helpful when installing cert manager as a chart dependency (sub chart). +# This namespace allows you to define where the services will be installed into +# if not set then they will use the namespace of the release +# This is helpful when installing cert manager as a chart dependency (sub chart) namespace: "" serviceAccount: - # Specifies whether a service account should be created. + # Specifies whether a service account should be created create: true # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property + # If not set and create is true, a name is generated using the fullname template # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property + # Optional additional annotations to add to the controller's ServiceAccount # annotations: {} - - # Optional additional labels to add to the controller's Service Account. - # +docs:property - # labels: {} - # Automount API credentials for a Service Account. + # Optional additional labels to add to the controller's ServiceAccount + # labels: {} automountServiceAccountToken: true -# Automounting API credentials for a particular pod. -# +docs:property +# Automounting API credentials for a particular pod # automountServiceAccountToken: true -# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted. +# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted enableCertificateOwnerRef: false -# This property is used to configure options for the controller pod. -# This allows setting options that would usually be provided using flags. -# An APIVersion and Kind must be specified in your values.yaml file. -# Flags will override options that are set here. -# -# For example: -# config: -# apiVersion: controller.config.cert-manager.io/v1alpha1 -# kind: ControllerConfiguration -# logging: -# verbosity: 2 -# format: text -# leaderElectionConfig: -# namespace: kube-system -# kubernetesAPIQPS: 9000 -# kubernetesAPIBurst: 9000 -# numberOfConcurrentWorkers: 200 -# featureGates: -# AdditionalCertificateOutputFormats: true -# DisallowInsecureCSRUsageDefinition: true -# ExperimentalCertificateSigningRequestControllers: true -# ExperimentalGatewayAPISupport: true -# LiteralCertificateSubject: true -# SecretsFilteredCaching: true -# ServerSideApply: true -# StableCertificateRequestName: true -# UseCertificateRequestBasicConstraints: true -# ValidateCAA: true -# metricsTLSConfig: -# dynamic: -# secretNamespace: "cert-manager" -# secretName: "cert-manager-metrics-ca" -# dnsNames: -# - cert-manager-metrics -# - cert-manager-metrics.cert-manager -# - cert-manager-metrics.cert-manager.svc -config: {} -# Setting Nameservers for DNS01 Self Check. -# For more information, see the [cert-manager documentation](https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check). +# Setting Nameservers for DNS01 Self Check +# See: https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check -# A comma-separated string with the host and port of the recursive nameservers cert-manager should query. +# Comma separated string with host and port of the recursive nameservers cert-manager should query dns01RecursiveNameservers: "" -# Forces cert-manager to use only the recursive nameservers for verification. -# Enabling this option could cause the DNS01 self check to take longer owing to caching performed by the recursive nameservers. +# Forces cert-manager to only use the recursive nameservers for verification. +# Enabling this option could cause the DNS01 self check to take longer due to caching performed by the recursive nameservers dns01RecursiveNameserversOnly: false -# Option to disable cert-manager's build-in auto-approver. The auto-approver -# approves all CertificateRequests that reference issuers matching the 'approveSignerNames' -# option. This 'disableAutoApproval' option is useful when you want to make all approval decisions -# using a different approver (like approver-policy - https://github.com/cert-manager/approver-policy). -disableAutoApproval: false -# List of signer names that cert-manager will approve by default. CertificateRequests -# referencing these signer names will be auto-approved by cert-manager. Defaults to just -# approving the cert-manager.io Issuer and ClusterIssuer issuers. When set to an empty -# array, ALL issuers will be auto-approved by cert-manager. To disable the auto-approval, -# because eg. you are using approver-policy, you can enable 'disableAutoApproval'. -# ref: https://cert-manager.io/docs/concepts/certificaterequest/#approval -# +docs:property -approveSignerNames: - - issuers.cert-manager.io/* - - clusterissuers.cert-manager.io/* # Additional command line flags to pass to cert-manager controller binary. -# To see all available flags run `docker run quay.io/jetstack/cert-manager-controller: --help`. -# -# Use this flag to enable or disable arbitrary controllers. For example, to disable the CertificiateRequests approver. -# -# For example: -# extraArgs: -# - --controllers=*,-certificaterequests-approver +# To see all available flags run docker run quay.io/jetstack/cert-manager-controller: --help extraArgs: [] -# Additional environment variables to pass to cert-manager controller binary. +# Use this flag to enable or disable arbitrary controllers, for example, disable the CertificiateRequests approver +# - --controllers=*,-certificaterequests-approver + extraEnv: [] # - name: SOME_VAR # value: 'some value' -# Resources to provide to the cert-manager controller pod. -# -# For example: -# requests: -# cpu: 10m -# memory: 32Mi -# -# For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). resources: {} -# Pod Security Context. -# For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). -# +docs:property +# requests: +# cpu: 10m +# memory: 32Mi + +# Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault -# Container Security Context to be set on the controller component container. -# For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). -# +docs:property +# Container Security Context to be set on the controller component container +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - readOnlyRootFilesystem: true -# Additional volumes to add to the cert-manager controller pod. + # readOnlyRootFilesystem: true + # runAsNonRoot: true volumes: [] -# Additional volume mounts to add to the cert-manager controller container. volumeMounts: [] -# Optional additional annotations to add to the controller Deployment. -# +docs:property +# Optional additional annotations to add to the controller Deployment # deploymentAnnotations: {} -# Optional additional annotations to add to the controller Pods. -# +docs:property +# Optional additional annotations to add to the controller Pods # podAnnotations: {} - -# Optional additional labels to add to the controller Pods. podLabels: {} -# Optional annotations to add to the controller Service. -# +docs:property +# Optional annotations to add to the controller Service # serviceAnnotations: {} -# Optional additional labels to add to the controller Service. -# +docs:property +# Optional additional labels to add to the controller Service # serviceLabels: {} -# Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). -# +docs:property -# serviceIPFamilyPolicy: "" - -# Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. -# +docs:property -# serviceIPFamilies: [] - -# Optional DNS settings. These are useful if you have a public and private DNS zone for -# the same domain on Route 53. The following is an example of ensuring +# Optional DNS settings, useful if you have a public and private DNS zone for +# the same domain on Route 53. What follows is an example of ensuring # cert-manager can access an ingress or DNS TXT records at all times. -# Note that this requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for # the cluster to work. - -# Pod DNS policy. -# For more information, see [Pod's DNS Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy). -# +docs:property # podDnsPolicy: "None" - -# Pod DNS configuration. The podDnsConfig field is optional and can work with any podDnsPolicy -# settings. However, when a Pod's dnsPolicy is set to "None", the dnsConfig field has to be specified. -# For more information, see [Pod's DNS Config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config). -# +docs:property # podDnsConfig: # nameservers: # - "1.1.1.1" # - "8.8.8.8" - -# Optional hostAliases for cert-manager-controller pods. May be useful when performing ACME DNS-01 self checks. -hostAliases: [] -# - ip: 127.0.0.1 -# hostnames: -# - foo.local -# - bar.local -# - ip: 10.1.2.3 -# hostnames: -# - foo.remote -# - bar.remote - -# The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with -# matching labels. -# For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). -# -# This default ensures that Pods are only scheduled to Linux nodes. -# It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. -# +docs:property nodeSelector: kubernetes.io/os: linux -# +docs:ignore ingressShim: {} -# Optional default issuer to use for ingress resources. -# +docs:property=ingressShim.defaultIssuerName # defaultIssuerName: "" - -# Optional default issuer kind to use for ingress resources. -# +docs:property=ingressShim.defaultIssuerKind # defaultIssuerKind: "" - -# Optional default issuer group to use for ingress resources. -# +docs:property=ingressShim.defaultIssuerGroup # defaultIssuerGroup: "" -# Use these variables to configure the HTTP_PROXY environment variables. - -# Configures the HTTP_PROXY environment variable where a HTTP proxy is required. -# +docs:property +prometheus: + enabled: true + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + annotations: {} + honorLabels: false +# Use these variables to configure the HTTP_PROXY environment variables # http_proxy: "http://proxy:8080" - -# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. -# +docs:property # https_proxy: "https://proxy:8080" - -# Configures the NO_PROXY environment variable where a HTTP proxy is required, -# but certain domains should be excluded. -# +docs:property # no_proxy: 127.0.0.1,localhost -# A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). -# -# For example: +# A Kubernetes Affinty, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core +# for example: # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: @@ -381,18 +204,16 @@ ingressShim: {} # values: # - master affinity: {} -# A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). -# -# For example: +# A list of Kubernetes Tolerations, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core +# for example: # tolerations: # - key: foo.bar.com/role # operator: Equal # value: master # effect: NoSchedule tolerations: [] -# A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core -# -# For example: +# A list of Kubernetes TopologySpreadConstraints, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core +# for example: # topologySpreadConstraints: # - maxSkew: 2 # topologyKey: topology.kubernetes.io/zone @@ -404,361 +225,150 @@ tolerations: [] topologySpreadConstraints: [] # LivenessProbe settings for the controller container of the controller Pod. # -# This is enabled by default, in order to enable the clock-skew liveness probe that -# restarts the controller in case of a skew between the system clock and the monotonic clock. +# Disabled by default, because the controller has a leader election mechanism +# which should cause it to exit if it is unable to renew its leader election +# record. # LivenessProbe durations and thresholds are based on those used for the Kubernetes -# controller-manager. For more information see the following on the -# [Kubernetes GitHub repository](https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245) -# +docs:property +# controller-manager. See: +# https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245 livenessProbe: - enabled: true + enabled: false initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 15 successThreshold: 1 failureThreshold: 8 -# enableServiceLinks indicates whether information about services should be -# injected into the pod's environment variables, matching the syntax of Docker -# links. -enableServiceLinks: false -# +docs:section=Prometheus -prometheus: - # Enable Prometheus monitoring for the cert-manager controller to use with the - # Prometheus Operator. If this option is enabled without enabling `prometheus.servicemonitor.enabled` or - # `prometheus.podmonitor.enabled`, 'prometheus.io' annotations are added to the cert-manager Deployment - # resources. Additionally, a service is created which can be used together - # with your own ServiceMonitor (managed outside of this Helm chart). - # Otherwise, a ServiceMonitor/ PodMonitor is created. - enabled: true - servicemonitor: - # Create a ServiceMonitor to add cert-manager to Prometheus. - enabled: false - # Specifies the `prometheus` label on the created ServiceMonitor. This is - # used when different Prometheus instances have label selectors matching - # different ServiceMonitors. - prometheusInstance: default - # The target port to set on the ServiceMonitor. This must match the port that the - # cert-manager controller is listening on for metrics. - targetPort: 9402 - # The path to scrape for metrics. - path: /metrics - # The interval to scrape metrics. - interval: 60s - # The timeout before a metrics scrape fails. - scrapeTimeout: 30s - # Additional labels to add to the ServiceMonitor. - labels: {} - # Additional annotations to add to the ServiceMonitor. - annotations: {} - # Keep labels from scraped data, overriding server-side labels. - honorLabels: false - # EndpointAdditionalProperties allows setting additional properties on the - # endpoint such as relabelings, metricRelabelings etc. - # - # For example: - # endpointAdditionalProperties: - # relabelings: - # - action: replace - # sourceLabels: - # - __meta_kubernetes_pod_node_name - # targetLabel: instance - # - # +docs:property - endpointAdditionalProperties: {} - # Note that you can not enable both PodMonitor and ServiceMonitor as they are mutually exclusive. Enabling both will result in a error. - podmonitor: - # Create a PodMonitor to add cert-manager to Prometheus. - enabled: false - # Specifies the `prometheus` label on the created PodMonitor. This is - # used when different Prometheus instances have label selectors matching - # different PodMonitors. - prometheusInstance: default - # The path to scrape for metrics. - path: /metrics - # The interval to scrape metrics. - interval: 60s - # The timeout before a metrics scrape fails. - scrapeTimeout: 30s - # Additional labels to add to the PodMonitor. - labels: {} - # Additional annotations to add to the PodMonitor. - annotations: {} - # Keep labels from scraped data, overriding server-side labels. - honorLabels: false - # EndpointAdditionalProperties allows setting additional properties on the - # endpoint such as relabelings, metricRelabelings etc. - # - # For example: - # endpointAdditionalProperties: - # relabelings: - # - action: replace - # sourceLabels: - # - __meta_kubernetes_pod_node_name - # targetLabel: instance - # - # +docs:property - endpointAdditionalProperties: {} -# +docs:section=Webhook webhook: - # Number of replicas of the cert-manager webhook to run. - # - # The default is 1, but in production set this to 2 or 3 to provide high - # availability. - # - # If `replicas > 1`, consider setting `webhook.podDisruptionBudget.enabled=true`. replicaCount: 1 - # The number of seconds the API server should wait for the webhook to respond before treating the call as a failure. - # The value must be between 1 and 30 seconds. For more information, see - # [Validating webhook configuration v1](https://kubernetes.io/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1/). - # - # The default is set to the maximum value of 30 seconds as - # users sometimes report that the connection between the K8S API server and - # the cert-manager webhook server times out. - # If *this* timeout is reached, the error message will be "context deadline exceeded", - # which doesn't help the user diagnose what phase of the HTTPS connection timed out. - # For example, it could be during DNS resolution, TCP connection, TLS - # negotiation, HTTP negotiation, or slow HTTP response from the webhook - # server. - # By setting this timeout to its maximum value the underlying timeout error - # message has more chance of being returned to the end user. - timeoutSeconds: 30 - # This is used to configure options for the webhook pod. - # This allows setting options that would usually be provided using flags. + timeoutSeconds: 10 + # Used to configure options for the webhook pod. + # This allows setting options that'd usually be provided via flags. # An APIVersion and Kind must be specified in your values.yaml file. - # Flags override options that are set here. - # - # For example: - # apiVersion: webhook.config.cert-manager.io/v1alpha1 - # kind: WebhookConfiguration - # # The port that the webhook listens on for requests. - # # In GKE private clusters, by default Kubernetes apiservers are allowed to - # # talk to the cluster nodes only on 443 and 10250. Configuring - # # securePort: 10250 therefore will work out-of-the-box without needing to add firewall - # # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers < 1000. - # # This should be uncommented and set as a default by the chart once - # # the apiVersion of WebhookConfiguration graduates beyond v1alpha1. - # securePort: 10250 - config: {} - # The update strategy for the cert-manager webhook deployment. - # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) - # - # For example: - # strategy: - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 + # Flags will override options that are set here. + config: + # apiVersion: webhook.config.cert-manager.io/v1alpha1 + # kind: WebhookConfiguration + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + # This should be uncommented and set as a default by the chart once we graduate + # the apiVersion of WebhookConfiguration past v1alpha1. + # securePort: 10250 strategy: {} - # Pod Security Context to be set on the webhook component Pod. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the webhook component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault - # Container Security Context to be set on the webhook component container. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + + # Container Security Context to be set on the webhook component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - readOnlyRootFilesystem: true - podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - # This property configures the minimum available pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 - # This property configures the maximum unavailable pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `minAvailable` is set. - # +docs:property - # maxUnavailable: 1 - - # Optional additional annotations to add to the webhook Deployment. - # +docs:property + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # Optional additional annotations to add to the webhook Deployment # deploymentAnnotations: {} - # Optional additional annotations to add to the webhook Pods. - # +docs:property + # Optional additional annotations to add to the webhook Pods # podAnnotations: {} - # Optional additional annotations to add to the webhook Service. - # +docs:property + # Optional additional annotations to add to the webhook Service # serviceAnnotations: {} - # Optional additional annotations to add to the webhook MutatingWebhookConfiguration. - # +docs:property + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration # mutatingWebhookConfigurationAnnotations: {} - # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration. - # +docs:property + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration # validatingWebhookConfigurationAnnotations: {} - validatingWebhookConfiguration: - # Configure spec.namespaceSelector for validating webhooks. - # +docs:property - namespaceSelector: - matchExpressions: - - key: "cert-manager.io/disable-validation" - operator: "NotIn" - values: - - "true" - mutatingWebhookConfiguration: - # Configure spec.namespaceSelector for mutating webhooks. - # +docs:property - namespaceSelector: {} - # matchLabels: - # key: value - # matchExpressions: - # - key: kubernetes.io/metadata.name - # operator: NotIn - # values: - # - kube-system + # Additional command line flags to pass to cert-manager webhook binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-webhook: --help`. + # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook: --help extraArgs: [] - # Path to a file containing a WebhookConfiguration object used to configure the webhook. + # Path to a file containing a WebhookConfiguration object used to configure the webhook # - --config= - # Comma separated list of feature gates that should be enabled on the - # webhook pod. - featureGates: "" - # Resources to provide to the cert-manager webhook pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). resources: {} - # Liveness probe values. - # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). - # - # +docs:property + # requests: + # cpu: 10m + # memory: 32Mi + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## livenessProbe: failureThreshold: 3 initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # Readiness probe values. - # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). - # - # +docs:property readinessProbe: failureThreshold: 3 initialDelaySeconds: 5 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 1 - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property nodeSelector: kubernetes.io/os: linux - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master affinity: {} - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule tolerations: [] - # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). - # - # For example: - # topologySpreadConstraints: - # - maxSkew: 2 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: ScheduleAnyway - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: cert-manager - # app.kubernetes.io/component: controller topologySpreadConstraints: [] - # Optional additional labels to add to the Webhook Pods. + # Optional additional labels to add to the Webhook Pods podLabels: {} - # Optional additional labels to add to the Webhook Service. + # Optional additional labels to add to the Webhook Service serviceLabels: {} - # Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). - serviceIPFamilyPolicy: "" - # Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. - serviceIPFamilies: [] image: - # The container registry to pull the webhook image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager webhook - # +docs:property repository: quay.io/jetstack/cert-manager-webhook + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-webhook + # Override the image tag to deploy by setting this variable. # If no value is set, the chart's appVersion will be used. - # +docs:property - # tag: vX.Y.Z + # tag: canary # Setting a digest will override any tag - # +docs:property # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. pullPolicy: IfNotPresent - digest: sha256:85df7b64a3d66de3cd7995ae0f2151b54fd18db424cb7cf84d3bd6d4a39d975f + digest: sha256:db0bb8c02c0b82f3055315fbc52ad41b90fbe94f82431a0d76666f7c6beeb7f0 serviceAccount: - # Specifies whether a service account should be created. + # Specifies whether a service account should be created create: true # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property + # If not set and create is true, a name is generated using the fullname template # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property + # Optional additional annotations to add to the controller's ServiceAccount # annotations: {} - - # Optional additional labels to add to the webhook's Service Account. - # +docs:property + # Optional additional labels to add to the webhook's ServiceAccount # labels: {} - # Automount API credentials for a Service Account. automountServiceAccountToken: true - # Automounting API credentials for a particular pod. - # +docs:property + # Automounting API credentials for a particular pod # automountServiceAccountToken: true - # The port that the webhook listens on for requests. - # In GKE private clusters, by default Kubernetes apiservers are allowed to - # talk to the cluster nodes only on 443 and 10250. Configuring - # securePort: 10250, therefore will work out-of-the-box without needing to add firewall - # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 securePort: 10250 # Specifies if the webhook should be started in hostNetwork mode. # @@ -771,12 +381,10 @@ webhook: # running in hostNetwork mode. hostNetwork: false # Specifies how the service should be handled. Useful if you want to expose the - # webhook outside of the cluster. In some cases, the control plane cannot + # webhook to outside of the cluster. In some cases, the control plane cannot # reach internal services. serviceType: ClusterIP - # Specify the load balancer IP for the created service. - # +docs:property - # loadBalancerIP: "10.10.10.10" + # loadBalancerIP: # Overrides the mutating webhook and validating webhook so they reach the webhook # service using the `url` field instead of a service. @@ -785,18 +393,11 @@ webhook: # Enables default network policies for webhooks. networkPolicy: - # Create network policies for the webhooks. enabled: false - # Ingress rule for the webhook network policy. By default, it allows all - # inbound traffic. - # +docs:property ingress: - from: - ipBlock: cidr: 0.0.0.0/0 - # Egress rule for the webhook network policy. By default, it allows all - # outbound traffic to ports 80 and 443, as well as DNS ports. - # +docs:property egress: - ports: - port: 80 @@ -807,393 +408,202 @@ webhook: protocol: TCP - port: 53 protocol: UDP - # On OpenShift and OKD, the Kubernetes API server listens on. + # On OpenShift and OKD, the Kubernetes API server listens on # port 6443. - port: 6443 protocol: TCP to: - ipBlock: cidr: 0.0.0.0/0 - # Additional volumes to add to the cert-manager controller pod. volumes: [] - # Additional volume mounts to add to the cert-manager controller container. volumeMounts: [] - # enableServiceLinks indicates whether information about services should be - # injected into the pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false -# +docs:section=CA Injector cainjector: - # Create the CA Injector deployment enabled: true - # The number of replicas of the cert-manager cainjector to run. - # - # The default is 1, but in production set this to 2 or 3 to provide high - # availability. - # - # If `replicas > 1`, consider setting `cainjector.podDisruptionBudget.enabled=true`. - # - # Note that cert-manager uses leader election to ensure that there can - # only be a single instance active at a time. replicaCount: 1 - # This is used to configure options for the cainjector pod. - # It allows setting options that are usually provided via flags. - # An APIVersion and Kind must be specified in your values.yaml file. - # Flags override options that are set here. - # - # For example: - # apiVersion: cainjector.config.cert-manager.io/v1alpha1 - # kind: CAInjectorConfiguration - # logging: - # verbosity: 2 - # format: text - # leaderElectionConfig: - # namespace: kube-system - config: {} - # Deployment update strategy for the cert-manager cainjector deployment. - # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). - # - # For example: - # strategy: - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + # Pod Security Context to be set on the cainjector component Pod - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # Container Security Context to be set on the cainjector component container - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - readOnlyRootFilesystem: true - podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - # `minAvailable` configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # Cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 - # `maxUnavailable` configures the maximum unavailable pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # Cannot be used if `minAvailable` is set. - # +docs:property - # maxUnavailable: 1 - - # Optional additional annotations to add to the cainjector Deployment. - # +docs:property + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # Optional additional annotations to add to the cainjector Deployment # deploymentAnnotations: {} - # Optional additional annotations to add to the cainjector Pods. - # +docs:property + # Optional additional annotations to add to the cainjector Pods # podAnnotations: {} # Additional command line flags to pass to cert-manager cainjector binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-cainjector: --help`. + # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector: --help extraArgs: [] - # Enable profiling for cainjector. + # Enable profiling for cainjector # - --enable-profiling=true - # Comma separated list of feature gates that should be enabled on the - # cainjector pod. - featureGates: "" - # Resources to provide to the cert-manager cainjector pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). resources: {} - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property + # requests: + # cpu: 10m + # memory: 32Mi + nodeSelector: kubernetes.io/os: linux - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master affinity: {} - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule tolerations: [] - # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). - # - # For example: - # topologySpreadConstraints: - # - maxSkew: 2 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: ScheduleAnyway - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: cert-manager - # app.kubernetes.io/component: controller topologySpreadConstraints: [] - # Optional additional labels to add to the CA Injector Pods. + # Optional additional labels to add to the CA Injector Pods podLabels: {} image: - # The container registry to pull the cainjector image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager cainjector - # +docs:property repository: quay.io/jetstack/cert-manager-cainjector + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-cainjector + # Override the image tag to deploy by setting this variable. # If no value is set, the chart's appVersion will be used. - # +docs:property - # tag: vX.Y.Z + # tag: canary - # Setting a digest will override any tag. - # +docs:property + # Setting a digest will override any tag # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. pullPolicy: IfNotPresent - digest: sha256:edb1c1e0083ee4cd8e2ccb296ee0f436d2e465ecf90159f9d03141fc19bd3c23 + digest: sha256:2a70d9497a645101210d077874c35dc0431233d8c6e53a851835ca301523d64b serviceAccount: - # Specifies whether a service account should be created. + # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template - # +docs:property # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property + # Optional additional annotations to add to the controller's ServiceAccount # annotations: {} - - # Optional additional labels to add to the cainjector's Service Account. - # +docs:property - # labels: {} - # Automount API credentials for a Service Account. + # Optional additional labels to add to the cainjector's ServiceAccount + # labels: {} automountServiceAccountToken: true - # Automounting API credentials for a particular pod. - # +docs:property + # Automounting API credentials for a particular pod # automountServiceAccountToken: true - - # Additional volumes to add to the cert-manager controller pod. volumes: [] - # Additional volume mounts to add to the cert-manager controller container. volumeMounts: [] - # enableServiceLinks indicates whether information about services should be - # injected into the pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false -# +docs:section=ACME Solver acmesolver: image: - # The container registry to pull the acmesolver image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager acmesolver. - # +docs:property repository: quay.io/jetstack/cert-manager-acmesolver - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-acmesolver - # Setting a digest will override any tag. - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + digest: sha256:12a62e54ba8defda94df71ef76f9c8fe68405d59370f665991734d6b692e35f2 +# Override the image tag to deploy by setting this variable. +# If no value is set, the chart's appVersion will be used. +# tag: canary + +# Setting a digest will override any tag +# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - digest: sha256:99feb5d6cd8e8b4c6eb1ab14b317304141c25114b0bd3e5588b9f551f664cb8f -# +docs:section=Startup API Check # This startupapicheck is a Helm post-install hook that waits for the webhook # endpoints to become available. -# The check is implemented using a Kubernetes Job - if you are injecting mesh -# sidecar proxies into cert-manager pods, ensure that they -# are not injected into this Job's pod. Otherwise, the installation may time out -# owing to the Job never being completed because the sidecar proxy does not exit. -# For more information, see [this note](https://github.com/cert-manager/cert-manager/pull/4414). +# The check is implemented using a Kubernetes Job- if you are injecting mesh +# sidecar proxies into cert-manager pods, you probably want to ensure that they +# are not injected into this Job's pod. Otherwise the installation may time out +# due to the Job never being completed because the sidecar proxy does not exit. +# See https://github.com/cert-manager/cert-manager/pull/4414 for context. startupapicheck: - # Enables the startup api check. enabled: true - # Pod Security Context to be set on the startupapicheck component Pod. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + # Pod Security Context to be set on the startupapicheck component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault - # Container Security Context to be set on the controller component container. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property + # Container Security Context to be set on the controller component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ containerSecurityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - readOnlyRootFilesystem: true - # Timeout for 'kubectl check api' command. + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # Timeout for 'kubectl check api' command timeout: 1m # Job backoffLimit backoffLimit: 4 - # Optional additional annotations to add to the startupapicheck Job. - # +docs:property + # Optional additional annotations to add to the startupapicheck Job jobAnnotations: helm.sh/hook: post-install helm.sh/hook-weight: "1" helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - # Optional additional annotations to add to the startupapicheck Pods. - # +docs:property + # Optional additional annotations to add to the startupapicheck Pods # podAnnotations: {} # Additional command line flags to pass to startupapicheck binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-startupapicheck: --help`. - # - # Verbose logging is enabled by default so that if startupapicheck fails, you - # can know what exactly caused the failure. Verbose logs include details of - # the webhook URL, IP address and TCP connect errors for example. - # +docs:property - extraArgs: - - -v - # Resources to provide to the cert-manager controller pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl: --help + extraArgs: [] resources: {} - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property + # requests: + # cpu: 10m + # memory: 32Mi + nodeSelector: kubernetes.io/os: linux - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master affinity: {} - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule tolerations: [] - # Optional additional labels to add to the startupapicheck Pods. + # Optional additional labels to add to the startupapicheck Pods podLabels: {} image: - # The container registry to pull the startupapicheck image from. - # +docs:property + repository: quay.io/jetstack/cert-manager-ctl + # You can manage a registry with # registry: quay.io + # repository: jetstack/cert-manager-ctl - # The container image for the cert-manager startupapicheck. - # +docs:property - repository: quay.io/jetstack/cert-manager-startupapicheck # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z + # If no value is set, the chart's appVersion will be used. + # tag: canary - # Setting a digest will override any tag. - # +docs:property + # Setting a digest will override any tag # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. pullPolicy: IfNotPresent - digest: sha256:6365e940a5a913a3aeca0ea519102236d9bec5f0e8f0011fa3498c26d18348e5 + digest: sha256:1b988a4a2ae83aae995d396fa67fdb4c90bc55bc91ea74679f17c6c347541406 rbac: - # annotations for the startup API Check job RBAC and PSP resources. - # +docs:property + # annotations for the startup API Check job RBAC and PSP resources annotations: helm.sh/hook: post-install helm.sh/hook-weight: "-5" helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - # Automounting API credentials for a particular pod. - # +docs:property + # Automounting API credentials for a particular pod # automountServiceAccountToken: true serviceAccount: - # Specifies whether a service account should be created. + # Specifies whether a service account should be created create: true # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property + # If not set and create is true, a name is generated using the fullname template # name: "" - # Optional additional annotations to add to the Job's Service Account. - # +docs:property + # Optional additional annotations to add to the Job's ServiceAccount annotations: helm.sh/hook: post-install helm.sh/hook-weight: "-5" helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded # Automount API credentials for a Service Account. - # +docs:property automountServiceAccountToken: true - # Optional additional labels to add to the startupapicheck's Service Account. - # +docs:property + # Optional additional labels to add to the startupapicheck's ServiceAccount # labels: {} - # Additional volumes to add to the cert-manager controller pod. volumes: [] - # Additional volume mounts to add to the cert-manager controller container. volumeMounts: [] - # enableServiceLinks indicates whether information about services should be - # injected into pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false -# Create dynamic manifests via values. -# -# For example: -# extraObjects: -# - | -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: '{{ template "cert-manager.name" . }}-extra-configmap' -extraObjects: [] diff --git a/internal/constellation/helm/charts/cilium/Chart.yaml b/internal/constellation/helm/charts/cilium/Chart.yaml index cca1abd89..3ba2d273f 100644 --- a/internal/constellation/helm/charts/cilium/Chart.yaml +++ b/internal/constellation/helm/charts/cilium/Chart.yaml @@ -2,10 +2,10 @@ apiVersion: v2 name: cilium displayName: Cilium home: https://cilium.io/ -version: 1.15.19-edg.0 -appVersion: 1.15.19-edg.0 +version: 1.15.0-pre.3-edg.3 +appVersion: 1.15.0-pre.3-edg.3 kubeVersion: ">= 1.16.0-0" -icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.15/Documentation/images/logo-solo.svg +icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg description: eBPF-based Networking, Security, and Observability keywords: - BPF diff --git a/internal/constellation/helm/charts/cilium/README.md b/internal/constellation/helm/charts/cilium/README.md index 5e177569d..bdbca6edc 100644 --- a/internal/constellation/helm/charts/cilium/README.md +++ b/internal/constellation/helm/charts/cilium/README.md @@ -1,6 +1,6 @@ # cilium -![Version: 1.15.19](https://img.shields.io/badge/Version-1.15.19-informational?style=flat-square) ![AppVersion: 1.15.19](https://img.shields.io/badge/AppVersion-1.15.19-informational?style=flat-square) +![Version: 1.15.0-pre.3](https://img.shields.io/badge/Version-1.15.0--pre.3-informational?style=flat-square) ![AppVersion: 1.15.0-pre.3](https://img.shields.io/badge/AppVersion-1.15.0--pre.3-informational?style=flat-square) Cilium is open source software for providing and transparently securing network connectivity and loadbalancing between application workloads such as @@ -46,7 +46,7 @@ offer from the [Getting Started Guides page](https://docs.cilium.io/en/stable/ge ## Getting Help The best way to get help if you get stuck is to ask a question on the -[Cilium Slack channel](https://slack.cilium.io). With Cilium +[Cilium Slack channel](https://cilium.herokuapp.com/). With Cilium contributors across the globe, there is almost always someone available to help. ## Values @@ -73,17 +73,16 @@ contributors across the globe, there is almost always someone available to help. | authentication.mutual.spire.enabled | bool | `false` | Enable SPIRE integration (beta) | | authentication.mutual.spire.install.agent.affinity | object | `{}` | SPIRE agent affinity configuration | | authentication.mutual.spire.install.agent.annotations | object | `{}` | SPIRE agent annotations | -| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.8.5","useDigest":true}` | SPIRE agent image | +| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:d489bc8470d7a0f292e0e3576c3e7025253343dc798241bcfd9061828e2a6bef","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.8.4","useDigest":true}` | SPIRE agent image | | authentication.mutual.spire.install.agent.labels | object | `{}` | SPIRE agent labels | | authentication.mutual.spire.install.agent.nodeSelector | object | `{}` | SPIRE agent nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | authentication.mutual.spire.install.agent.podSecurityContext | object | `{}` | Security context to be added to spire agent pods. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod | | authentication.mutual.spire.install.agent.securityContext | object | `{}` | Security context to be added to spire agent containers. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container | | authentication.mutual.spire.install.agent.serviceAccount | object | `{"create":true,"name":"spire-agent"}` | SPIRE agent service account | | authentication.mutual.spire.install.agent.skipKubeletVerification | bool | `true` | SPIRE Workload Attestor kubelet verification. | -| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | +| authentication.mutual.spire.install.agent.tolerations | list | `[]` | SPIRE agent tolerations configuration ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true | -| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. | -| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:7edf5efe6b86dbf01ccc3c76b32a37a8e23b84e6bad81ce8ae8c221fa456fda8","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server | +| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server | | authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into | | authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration | | authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations | @@ -93,7 +92,7 @@ contributors across the globe, there is almost always someone available to help. | authentication.mutual.spire.install.server.dataStorage.enabled | bool | `true` | Enable SPIRE server data storage | | authentication.mutual.spire.install.server.dataStorage.size | string | `"1Gi"` | Size of the SPIRE server data storage | | authentication.mutual.spire.install.server.dataStorage.storageClass | string | `nil` | StorageClass of the SPIRE server data storage | -| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.8.5","useDigest":true}` | SPIRE server image | +| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:bf79e0a921f8b8aa92602f7ea335616e72f7e91f939848e7ccc52d5bddfe96a1","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.8.4","useDigest":true}` | SPIRE server image | | authentication.mutual.spire.install.server.initContainers | list | `[]` | SPIRE server init containers | | authentication.mutual.spire.install.server.labels | object | `{}` | SPIRE server labels | | authentication.mutual.spire.install.server.nodeSelector | object | `{}` | SPIRE server nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | @@ -136,14 +135,13 @@ contributors across the globe, there is almost always someone available to help. | bpf.monitorInterval | string | `"5s"` | Configure the typical time between monitor notifications for active connections. | | bpf.natMax | int | `524288` | Configure the maximum number of entries for the NAT table. | | bpf.neighMax | int | `524288` | Configure the maximum number of entries for the neighbor table. | -| bpf.nodeMapMax | int | `nil` | Configures the maximum number of entries for the node table. | -| bpf.policyMapMax | int | `16384` | Configure the maximum number of entries in endpoint policy map (per endpoint). @schema type: [null, integer] @schema | +| bpf.policyMapMax | int | `16384` | Configure the maximum number of entries in endpoint policy map (per endpoint). | | bpf.preallocateMaps | bool | `false` | Enables pre-allocation of eBPF map values. This increases memory usage but can reduce latency. | | bpf.root | string | `"/sys/fs/bpf"` | Configure the mount point for the BPF filesystem | | bpf.tproxy | bool | `false` | Configure the eBPF-based TPROXY to reduce reliance on iptables rules for implementing Layer 7 policy. | | bpf.vlanBypass | list | `[]` | Configure explicitly allowed VLAN id's for bpf logic bypass. [0] will allow all VLAN id's without any filtering. | | bpfClockProbe | bool | `false` | Enable BPF clock source probing for more efficient tick retrieval. | -| certgen | object | `{"affinity":{},"annotations":{"cronJob":{},"job":{}},"extraVolumeMounts":[],"extraVolumes":[],"image":{"digest":"sha256:28511366bb5dc99b6ec424dc87399945714d57a586194658d9e2316ba3db4d04","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.1.19","useDigest":true},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | +| certgen | object | `{"affinity":{},"annotations":{"cronJob":{},"job":{}},"extraVolumeMounts":[],"extraVolumes":[],"image":{"digest":"sha256:89a0847753686444daabde9474b48340993bd19c7bea66a46e45b2974b82041f","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.1.9","useDigest":true},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | | certgen.affinity | object | `{}` | Affinity for certgen | | certgen.annotations | object | `{"cronJob":{},"job":{}}` | Annotations to be added to the hubble-certgen initial Job and CronJob | | certgen.extraVolumeMounts | list | `[]` | Additional certgen volumeMounts. | @@ -171,7 +169,7 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. | | clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. | | clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. | -| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.15.19","useDigest":false}` | Clustermesh API server image. | +| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.15.0-pre.3","useDigest":false}` | Clustermesh API server image. | | clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. | | clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. | | clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. | @@ -213,8 +211,6 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.service.annotations | object | `{}` | Annotations for the clustermesh-apiserver For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 | | clustermesh.apiserver.service.externalTrafficPolicy | string | `nil` | The externalTrafficPolicy of service used for apiserver access. | | clustermesh.apiserver.service.internalTrafficPolicy | string | `nil` | The internalTrafficPolicy of service used for apiserver access. | -| clustermesh.apiserver.service.loadBalancerClass | string | `nil` | Configure a loadBalancerClass. Allows to configure the loadBalancerClass on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer (requires Kubernetes 1.24+). | -| clustermesh.apiserver.service.loadBalancerIP | string | `nil` | Configure a specific loadBalancerIP. Allows to configure a specific loadBalancerIP on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. | | clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. WARNING: make sure to configure a different NodePort in each cluster if kube-proxy replacement is enabled, as Cilium is currently affected by a known bug (#24692) when NodePorts are handled by the KPR implementation. If a service with the same NodePort exists both in the local and the remote cluster, all traffic originating from inside the cluster and targeting the corresponding NodePort will be redirected to a local backend, regardless of whether the destination node belongs to the local or the remote cluster. | | clustermesh.apiserver.service.type | string | `"NodePort"` | The type of service used for apiserver access. | | clustermesh.apiserver.terminationGracePeriodSeconds | int | `30` | terminationGracePeriodSeconds for the clustermesh-apiserver deployment | @@ -276,7 +272,6 @@ contributors across the globe, there is almost always someone available to help. | dnsProxy.preCache | string | `""` | DNS cache data at this path is preloaded on agent startup. | | dnsProxy.proxyPort | int | `0` | Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. | | dnsProxy.proxyResponseMaxDelay | string | `"100ms"` | The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. | -| dnsProxy.socketLingerTimeout | int | `10` | Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. | | egressGateway.enabled | bool | `false` | Enables egress gateway to redirect and SNAT the traffic that leaves the cluster. | | egressGateway.installRoutes | bool | `false` | Deprecated without a replacement necessary. | | egressGateway.reconciliationTriggerInterval | string | `"1s"` | Time between triggers of egress gateway state reconciliations | @@ -302,11 +297,10 @@ contributors across the globe, there is almost always someone available to help. | encryption.mountPath | string | `"/etc/ipsec"` | Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15. Path to mount the secret inside the Cilium pod. This option is only effective when encryption.type is set to ipsec. | | encryption.nodeEncryption | bool | `false` | Enable encryption for pure node to node traffic. This option is only effective when encryption.type is set to "wireguard". | | encryption.secretName | string | `"cilium-ipsec-keys"` | Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15. Name of the Kubernetes secret containing the encryption keys. This option is only effective when encryption.type is set to ipsec. | -| encryption.strictMode | object | `{"allowRemoteNodeIdentities":true,"enabled":false,"nodeCIDRList":[],"podCIDRList":[]}` | Configure the WireGuard strict mode. | -| encryption.strictMode.allowRemoteNodeIdentities | bool | `true` | Allow dynamic lookup of remote node identities. This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. This is also required when control-plane nodes are exempted from node-to-node encryption. | -| encryption.strictMode.enabled | bool | `false` | Enable WireGuard strict mode. | -| encryption.strictMode.nodeCIDRList | list | `[]` | nodeCIDRList for the WireGuard strict mode. | -| encryption.strictMode.podCIDRList | list | `[]` | podCIDRList for the WireGuard strict mode. | +| encryption.strictMode | object | `{"allowRemoteNodeIdentities":false,"cidr":"","enabled":false}` | Configure the WireGuard Pod2Pod strict mode. | +| encryption.strictMode.allowRemoteNodeIdentities | bool | `false` | Allow dynamic lookup of remote node identities. This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. | +| encryption.strictMode.cidr | string | `""` | CIDR for the WireGuard Pod2Pod strict mode. | +| encryption.strictMode.enabled | bool | `false` | Enable WireGuard Pod2Pod strict mode. | | encryption.type | string | `"ipsec"` | Encryption method. Can be either ipsec or wireguard. | | encryption.wireguard.persistentKeepalive | string | `"0s"` | Controls Wireguard PersistentKeepalive option. Set 0s to disable. | | encryption.wireguard.userspaceFallback | bool | `false` | Enables the fallback to the user-space implementation. | @@ -325,7 +319,7 @@ contributors across the globe, there is almost always someone available to help. | eni.subnetIDsFilter | list | `[]` | Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. | | eni.subnetTagsFilter | list | `[]` | Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. | | eni.updateEC2AdapterLimitViaAPI | bool | `true` | Update ENI Adapter limits from the EC2 API | -| envoy.affinity | object | `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"cilium.io/no-schedule","operator":"NotIn","values":["true"]}]}]}},"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. | +| envoy.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. | | envoy.annotations | object | `{}` | Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) | | envoy.connectTimeoutSeconds | int | `2` | Time in seconds after which a TCP connection attempt times out | | envoy.dnsPolicy | string | `nil` | DNS policy for Cilium envoy pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | @@ -338,7 +332,7 @@ contributors across the globe, there is almost always someone available to help. | envoy.extraVolumes | list | `[]` | Additional envoy volumes. | | envoy.healthPort | int | `9878` | TCP port for the health API. | | envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s | -| envoy.image | object | `{"digest":"sha256:318eff387835ca2717baab42a84f35a83a5f9e7d519253df87269f80b9ff0171","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.33.4-1752151664-7c2edb0b44cf95f326d628b837fcdd845102ba68","useDigest":true}` | Envoy container image. | +| envoy.image | object | `{"digest":"sha256:80de27c1d16ab92923cc0cd1fff90f2e7047a9abf3906fda712268d9cbc5b950","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.27.2-f19708f3d0188fe39b7e024b4525b75a9eeee61f","useDigest":true}` | Envoy container image. | | envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe | | envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe | | envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. | @@ -348,18 +342,16 @@ contributors across the globe, there is almost always someone available to help. | envoy.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for cilium-envoy. | | envoy.podAnnotations | object | `{}` | Annotations to be added to envoy pods | | envoy.podLabels | object | `{}` | Labels to be added to envoy pods | -| envoy.podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-envoy pods. | -| envoy.podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-agent` and init containers | +| envoy.podSecurityContext | object | `{}` | Security Context for cilium-envoy pods. | | envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. | -| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. | | envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy | | envoy.prometheus.port | string | `"9964"` | Serve prometheus metrics for cilium-envoy on the configured port | | envoy.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-envoy | -| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) Note that this setting applies to both cilium-envoy _and_ cilium-agent with Envoy enabled. | +| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | | envoy.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. | | envoy.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-envoy | -| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. | -| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. | +| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy | +| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy | | envoy.readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe | | envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe | | envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | @@ -372,8 +364,6 @@ contributors across the globe, there is almost always someone available to help. | envoy.terminationGracePeriodSeconds | int | `1` | Configure termination grace period for cilium-envoy DaemonSet. | | envoy.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for envoy scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | envoy.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | cilium-envoy update strategy ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset | -| envoy.xffNumTrustedHopsL7PolicyEgress | int | `0` | Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. | -| envoy.xffNumTrustedHopsL7PolicyIngress | int | `0` | Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. | | envoyConfig.enabled | bool | `false` | Enable CiliumEnvoyConfig CRD CiliumEnvoyConfig CRD can also be implicitly enabled by other options. | | envoyConfig.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. | | envoyConfig.secretsNamespace.create | bool | `true` | Create secrets namespace for CiliumEnvoyConfig CRDs. | @@ -451,22 +441,20 @@ contributors across the globe, there is almost always someone available to help. | hubble.peerService.clusterDomain | string | `"cluster.local"` | The cluster domain to use to query the Hubble Peer service. It should be the local cluster. | | hubble.peerService.targetPort | int | `4244` | Target Port for the Peer service, must match the hubble.listenAddress' port. | | hubble.preferIpv6 | bool | `false` | Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. | -| hubble.redact | object | `{"enabled":false,"http":{"headers":{"allow":[],"deny":[]},"urlQuery":false,"userInfo":true},"kafka":{"apiKey":true}}` | Enables redacting sensitive information present in Layer 7 flows. | +| hubble.redact | object | `{"enabled":false,"http":{"headers":{"allow":[],"deny":[]},"urlQuery":false,"userInfo":true},"kafka":{"apiKey":false}}` | Enables redacting sensitive information present in Layer 7 flows. | | hubble.redact.http.headers.allow | list | `[]` | List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. Example: redact: enabled: true http: headers: allow: - traceparent - tracestate - Cache-Control You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" | | hubble.redact.http.headers.deny | list | `[]` | List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. Example: redact: enabled: true http: headers: deny: - Authorization - Proxy-Authorization You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" | | hubble.redact.http.urlQuery | bool | `false` | Enables redacting URL query (GET) parameters. Example: redact: enabled: true http: urlQuery: true You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.http.urlQuery="true" | | hubble.redact.http.userInfo | bool | `true` | Enables redacting user info, e.g., password when basic auth is used. Example: redact: enabled: true http: userInfo: true You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.http.userInfo="true" | -| hubble.redact.kafka.apiKey | bool | `true` | Enables redacting Kafka's API key. Example: redact: enabled: true kafka: apiKey: true You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.kafka.apiKey="true" | +| hubble.redact.kafka.apiKey | bool | `false` | Enables redacting Kafka's API key. Example: redact: enabled: true kafka: apiKey: true You can specify the options from the helm CLI: --set hubble.redact.enabled="true" --set hubble.redact.kafka.apiKey="true" | | hubble.relay.affinity | object | `{"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for hubble-replay | | hubble.relay.annotations | object | `{}` | Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) | | hubble.relay.dialTimeout | string | `nil` | Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). | | hubble.relay.enabled | bool | `false` | Enable Hubble Relay (requires hubble.enabled=true) | | hubble.relay.extraEnv | list | `[]` | Additional hubble-relay environment variables. | -| hubble.relay.extraVolumeMounts | list | `[]` | Additional hubble-relay volumeMounts. | -| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. | | hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay | | hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay | -| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.15.19","useDigest":false}` | Hubble-relay container image. | +| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.15.0-pre.3","useDigest":false}` | Hubble-relay container image. | | hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. | | hubble.relay.listenPort | string | `"4245"` | Port to listen to. | | hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | @@ -524,7 +512,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. | | hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. | | hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. | -| hubble.ui.backend.image | object | `{"digest":"sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.2","useDigest":true}` | Hubble-ui backend image. | +| hubble.ui.backend.image | object | `{"digest":"sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.12.1","useDigest":true}` | Hubble-ui backend image. | | hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) | | hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) | | hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. | @@ -534,7 +522,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. | | hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. | | hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. | -| hubble.ui.frontend.image | object | `{"digest":"sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.2","useDigest":true}` | Hubble-ui frontend image. | +| hubble.ui.frontend.image | object | `{"digest":"sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.12.1","useDigest":true}` | Hubble-ui frontend image. | | hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. | | hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. | | hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 | @@ -561,7 +549,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. | | identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). | | identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. | -| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.19","useDigest":false}` | Agent container image. | +| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0-pre.3","useDigest":false}` | Agent container image. | | imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images | | ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set | | ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. | @@ -585,7 +573,6 @@ contributors across the globe, there is almost always someone available to help. | ingressController.service.name | string | `"cilium-ingress"` | Service name | | ingressController.service.secureNodePort | string | `nil` | Configure a specific nodePort for secure HTTPS traffic on the shared LB service | | ingressController.service.type | string | `"LoadBalancer"` | Service type for the shared LB service | -| initResources | object | `{}` | resources & limits for the agent init containers | | installNoConntrackIptablesRules | bool | `false` | Install Iptables rules to skip netfilter connection tracking on all pod traffic. This option is only effective when Cilium is running in direct routing and full KPR mode. Moreover, this option cannot be enabled when Cilium is running in a managed Kubernetes environment or in a chained CNI setup. | | ipMasqAgent | object | `{"enabled":false}` | Configure the eBPF-based ip-masq-agent | | ipam.ciliumNodeUpdateRate | string | `"15s"` | Maximum rate at which the CiliumNode custom resource is updated. | @@ -650,12 +637,10 @@ contributors across the globe, there is almost always someone available to help. | nodeinit.extraEnv | list | `[]` | Additional nodeinit environment variables. | | nodeinit.extraVolumeMounts | list | `[]` | Additional nodeinit volumeMounts. | | nodeinit.extraVolumes | list | `[]` | Additional nodeinit volumes. | -| nodeinit.image | object | `{"digest":"sha256:8d7b41c4ca45860254b3c19e20210462ef89479bb6331d6760c4e609d651b29c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"c54c7edeab7fde4da68e59acd319ab24af242c3f","useDigest":true}` | node-init image. | +| nodeinit.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"62093c5c233ea914bfa26a10ba41f8780d9b737f"}` | node-init image. | | nodeinit.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | nodeinit.podAnnotations | object | `{}` | Annotations to be added to node-init pods. | | nodeinit.podLabels | object | `{}` | Labels to be added to node-init pods. | -| nodeinit.podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-node-init pods. | -| nodeinit.podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-node-init` and init containers | | nodeinit.prestop | object | `{"postScript":"","preScript":""}` | prestop offers way to customize prestop nodeinit script (pre and post position) | | nodeinit.priorityClassName | string | `""` | The priority class to use for the nodeinit pod. | | nodeinit.resources | object | `{"requests":{"cpu":"100m","memory":"100Mi"}}` | nodeinit resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | @@ -676,7 +661,7 @@ contributors across the globe, there is almost always someone available to help. | operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. | | operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. | | operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. | -| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.15.19","useDigest":false}` | cilium-operator image. | +| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.15.0-pre.3","useDigest":false}` | cilium-operator image. | | operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. | | operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods | @@ -714,8 +699,7 @@ contributors across the globe, there is almost always someone available to help. | pmtuDiscovery.enabled | bool | `false` | Enable path MTU discovery to send ICMP fragmentation-needed replies to the client. | | podAnnotations | object | `{}` | Annotations to be added to agent pods | | podLabels | object | `{}` | Labels to be added to agent pods | -| podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-agent pods. | -| podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-agent` and init containers | +| podSecurityContext | object | `{}` | Security Context for cilium-agent pods. | | policyCIDRMatchMode | string | `nil` | policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. The possible value is "nodes". | | policyEnforcementMode | string | `"default"` | The agent can be put into one of the three policy enforcement modes: default, always and never. ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes | | pprof.address | string | `"localhost"` | Configure pprof listen address for cilium-agent | @@ -727,7 +711,7 @@ contributors across the globe, there is almost always someone available to help. | preflight.extraEnv | list | `[]` | Additional preflight environment variables. | | preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. | | preflight.extraVolumes | list | `[]` | Additional preflight volumes. | -| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.19","useDigest":false}` | Cilium pre-flight image. | +| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0-pre.3","useDigest":false}` | Cilium pre-flight image. | | preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods | | preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | @@ -762,7 +746,7 @@ contributors across the globe, there is almost always someone available to help. | rbac.create | bool | `true` | Enable creation of Resource-Based Access Control configuration. | | readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe | | readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe | -| remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity Deprecated without replacement in 1.15. To be removed in 1.16. | +| remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity | | resourceQuotas | object | `{"cilium":{"hard":{"pods":"10k"}},"enabled":false,"operator":{"hard":{"pods":"15"}}}` | Enable resource quotas for priority classes used in the cluster. | | resources | object | `{}` | Agent resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | rollOutCiliumPods | bool | `false` | Roll out cilium agent pods automatically when configmap is updated. | @@ -787,8 +771,6 @@ contributors across the globe, there is almost always someone available to help. | startupProbe.periodSeconds | int | `2` | interval between checks of the startup probe | | svcSourceRangeCheck | bool | `true` | Enable check of service source ranges (currently, only for LoadBalancer). | | synchronizeK8sNodes | bool | `true` | Synchronize Kubernetes nodes to kvstore and perform CNP GC. | -| sysctlfix | object | `{"enabled":true}` | Configure sysctl override described in #20072. | -| sysctlfix.enabled | bool | `true` | Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. | | terminationGracePeriodSeconds | int | `1` | Configure termination grace period for cilium-agent DaemonSet. | | tls | object | `{"ca":{"cert":"","certValidityDuration":1095,"key":""},"caBundle":{"enabled":false,"key":"ca.crt","name":"cilium-root-ca.crt","useSecret":false},"secretsBackend":"local"}` | Configure TLS configuration in the agent. | | tls.ca | object | `{"cert":"","certValidityDuration":1095,"key":""}` | Base64 encoded PEM values for the CA certificate and private key. This can be used as common CA to generate certificates used by hubble and clustermesh components. It is neither required nor used when cert-manager is used to generate the certificates. | diff --git a/internal/constellation/helm/charts/cilium/README.md.gotmpl b/internal/constellation/helm/charts/cilium/README.md.gotmpl index 4aa7da8f9..db2d81b74 100644 --- a/internal/constellation/helm/charts/cilium/README.md.gotmpl +++ b/internal/constellation/helm/charts/cilium/README.md.gotmpl @@ -48,7 +48,7 @@ offer from the [Getting Started Guides page](https://docs.cilium.io/en/stable/ge ## Getting Help The best way to get help if you get stuck is to ask a question on the -[Cilium Slack channel](https://slack.cilium.io). With Cilium +[Cilium Slack channel](https://cilium.herokuapp.com/). With Cilium contributors across the globe, there is almost always someone available to help. {{ template "chart.valuesSection" . }} diff --git a/internal/constellation/helm/charts/cilium/files/agent/poststart-eni.bash b/internal/constellation/helm/charts/cilium/files/agent/poststart-eni.bash index a57d89682..66fccf457 100644 --- a/internal/constellation/helm/charts/cilium/files/agent/poststart-eni.bash +++ b/internal/constellation/helm/charts/cilium/files/agent/poststart-eni.bash @@ -11,9 +11,9 @@ set -o nounset # dependencies on anything that is part of the startup script # itself, and can be safely run multiple times per node (e.g. in # case of a restart). -if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; +if [[ "$(iptables-save | grep -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; then echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore + iptables-save | grep -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore fi echo 'Done!' diff --git a/internal/constellation/helm/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json b/internal/constellation/helm/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json index 94af2eac3..992c8524a 100644 --- a/internal/constellation/helm/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json +++ b/internal/constellation/helm/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json @@ -5823,7 +5823,7 @@ "refId": "C" }, { - "expr": "sum(cilium_policy_change_total{k8s_app=\"cilium\", pod=~\"$pod\", outcome=\"fail\"}) by (pod)", + "expr": "sum(cilium_policy_change_total{k8s_app=\"cilium\", pod=~\"$pod\"}, outcome=\"fail\") by (pod)", "format": "time_series", "intervalFactor": 1, "legendFormat": "policy change errors", diff --git a/internal/constellation/helm/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.yaml b/internal/constellation/helm/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.yaml deleted file mode 100644 index 920837268..000000000 --- a/internal/constellation/helm/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.yaml +++ /dev/null @@ -1,232 +0,0 @@ -node: - id: "host~127.0.0.1~no-id~localdomain" - cluster: "ingress-cluster" -staticResources: - listeners: - {{- if .Values.envoy.prometheus.enabled }} - - name: "envoy-prometheus-metrics-listener" - address: - socketAddress: - address: {{ .Values.ipv4.enabled | ternary "0.0.0.0" "::" | quote }} - portValue: {{ .Values.envoy.prometheus.port }} - {{- if and .Values.ipv4.enabled .Values.ipv6.enabled }} - additionalAddresses: - - address: - socketAddress: - address: "::" - portValue: {{ .Values.envoy.prometheus.port }} - {{- end }} - filterChains: - - filters: - - name: "envoy.filters.network.http_connection_manager" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" - statPrefix: "envoy-prometheus-metrics-listener" - routeConfig: - virtualHosts: - - name: "prometheus_metrics_route" - domains: - - "*" - routes: - - name: "prometheus_metrics_route" - match: - prefix: "/metrics" - route: - cluster: "/envoy-admin" - prefixRewrite: "/stats/prometheus" - httpFilters: - - name: "envoy.filters.http.router" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" - internalAddressConfig: - cidrRanges: - {{- if .Values.ipv4.enabled }} - - addressPrefix: "10.0.0.0" - prefixLen: 8 - - addressPrefix: "172.16.0.0" - prefixLen: 12 - - addressPrefix: "192.168.0.0" - prefixLen: 16 - - addressPrefix: "127.0.0.1" - prefixLen: 32 - {{- end }} - {{- if .Values.ipv6.enabled }} - - addressPrefix: "::1" - prefixLen: 128 - {{- end }} - streamIdleTimeout: "0s" - {{- end }} - - name: "envoy-health-listener" - address: - socketAddress: - address: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} - portValue: {{ .Values.envoy.healthPort }} - {{- if and .Values.ipv4.enabled .Values.ipv6.enabled }} - additionalAddresses: - - address: - socketAddress: - address: "::1" - portValue: {{ .Values.envoy.healthPort }} - {{- end }} - filterChains: - - filters: - - name: "envoy.filters.network.http_connection_manager" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" - statPrefix: "envoy-health-listener" - routeConfig: - virtual_hosts: - - name: "health" - domains: - - "*" - routes: - - name: "health" - match: - prefix: "/healthz" - route: - cluster: "/envoy-admin" - prefixRewrite: "/ready" - httpFilters: - - name: "envoy.filters.http.router" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" - internalAddressConfig: - cidrRanges: - {{- if .Values.ipv4.enabled }} - - addressPrefix: "10.0.0.0" - prefixLen: 8 - - addressPrefix: "172.16.0.0" - prefixLen: 12 - - addressPrefix: "192.168.0.0" - prefixLen: 16 - - addressPrefix: "127.0.0.1" - prefixLen: 32 - {{- end }} - {{- if .Values.ipv6.enabled }} - - addressPrefix: "::1" - prefixLen: 128 - {{- end }} - streamIdleTimeout: "0s" - clusters: - - name: "ingress-cluster" - type: "ORIGINAL_DST" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - lbPolicy: "CLUSTER_PROVIDED" - typedExtensionProtocolOptions: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" - commonHttpProtocolOptions: - idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s" - maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s" - maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }} - useDownstreamProtocolConfig: {} - cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s" - - name: "egress-cluster-tls" - type: "ORIGINAL_DST" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - lbPolicy: "CLUSTER_PROVIDED" - typedExtensionProtocolOptions: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" - commonHttpProtocolOptions: - idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s" - maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s" - maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }} - upstreamHttpProtocolOptions: {} - useDownstreamProtocolConfig: {} - cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s" - transportSocket: - name: "cilium.tls_wrapper" - typedConfig: - "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext" - - name: "egress-cluster" - type: "ORIGINAL_DST" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - lbPolicy: "CLUSTER_PROVIDED" - typedExtensionProtocolOptions: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" - commonHttpProtocolOptions: - idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s" - maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s" - maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }} - useDownstreamProtocolConfig: {} - cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s" - - name: "ingress-cluster-tls" - type: "ORIGINAL_DST" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - lbPolicy: "CLUSTER_PROVIDED" - typedExtensionProtocolOptions: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" - commonHttpProtocolOptions: - idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s" - maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s" - maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }} - upstreamHttpProtocolOptions: {} - useDownstreamProtocolConfig: {} - cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s" - transportSocket: - name: "cilium.tls_wrapper" - typedConfig: - "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext" - - name: "xds-grpc-cilium" - type: "STATIC" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - loadAssignment: - clusterName: "xds-grpc-cilium" - endpoints: - - lbEndpoints: - - endpoint: - address: - pipe: - path: "/var/run/cilium/envoy/sockets/xds.sock" - typedExtensionProtocolOptions: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions" - explicitHttpConfig: - http2ProtocolOptions: {} - - name: "/envoy-admin" - type: "STATIC" - connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s" - loadAssignment: - clusterName: "/envoy-admin" - endpoints: - - lbEndpoints: - - endpoint: - address: - pipe: - path: "/var/run/cilium/envoy/sockets/admin.sock" -dynamicResources: - ldsConfig: - apiConfigSource: - apiType: "GRPC" - transportApiVersion: "V3" - grpcServices: - - envoyGrpc: - clusterName: "xds-grpc-cilium" - setNodeOnFirstMessageOnly: true - resourceApiVersion: "V3" - cdsConfig: - apiConfigSource: - apiType: "GRPC" - transportApiVersion: "V3" - grpcServices: - - envoyGrpc: - clusterName: "xds-grpc-cilium" - setNodeOnFirstMessageOnly: true - resourceApiVersion: "V3" -bootstrapExtensions: -- name: "envoy.bootstrap.internal_listener" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener" -overloadManager: - resourceMonitors: - - name: "envoy.resource_monitors.global_downstream_max_connections" - typedConfig: - "@type": "type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig" - max_active_downstream_connections: "50000" -admin: - address: - pipe: - path: "/var/run/cilium/envoy/sockets/admin.sock" diff --git a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dashboard.json b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dashboard.json index 0ff1dcbec..8de5ec1d0 100644 --- a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dashboard.json +++ b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dashboard.json @@ -3194,23 +3194,7 @@ "style": "dark", "tags": [], "templating": { - "list": [ - { - "current": {}, - "hide": 0, - "includeAll": false, - "label": "Prometheus", - "multi": false, - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - } - ] + "list": [] }, "time": { "from": "now-6h", diff --git a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json index 57f804cf2..d286fdb3a 100644 --- a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json +++ b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json @@ -484,7 +484,7 @@ "includeAll": false, "label": "Data Source", "multi": false, - "name": "DS_PROMETHEUS", + "name": "prometheus_datasource", "options": [], "query": "prometheus", "queryValue": "", diff --git a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json index cddb473d7..d0cf9d3b4 100644 --- a/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json +++ b/internal/constellation/helm/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json @@ -883,7 +883,7 @@ "includeAll": false, "label": "Data Source", "multi": false, - "name": "DS_PROMETHEUS", + "name": "prometheus_datasource", "options": [], "query": "prometheus", "queryValue": "", diff --git a/internal/constellation/helm/charts/cilium/files/nodeinit/startup.bash b/internal/constellation/helm/charts/cilium/files/nodeinit/startup.bash index aaaba0925..975b71fa7 100644 --- a/internal/constellation/helm/charts/cilium/files/nodeinit/startup.bash +++ b/internal/constellation/helm/charts/cilium/files/nodeinit/startup.bash @@ -100,7 +100,7 @@ then # Since that version containerd no longer allows missing configuration for the CNI, # not even for pods with hostNetwork set to true. Thus, we add a temporary one. # This will be replaced with the real config by the agent pod. - echo -e '{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}' > /etc/cni/net.d/05-cilium.conf + echo -e "{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}" > /etc/cni/net.d/05-cilium.conf fi # Start containerd. It won't create it's CNI configuration file anymore. diff --git a/internal/constellation/helm/charts/cilium/templates/_helpers.tpl b/internal/constellation/helm/charts/cilium/templates/_helpers.tpl index 39b3d6955..3e5429e2a 100644 --- a/internal/constellation/helm/charts/cilium/templates/_helpers.tpl +++ b/internal/constellation/helm/charts/cilium/templates/_helpers.tpl @@ -43,7 +43,62 @@ where: {{- if $priorityClass }} {{- $priorityClass }} {{- else if and $root.Values.enableCriticalPriorityClass $criticalPriorityClass -}} - {{- $criticalPriorityClass }} + {{- if and (eq $root.Release.Namespace "kube-system") (semverCompare ">=1.10-0" $root.Capabilities.KubeVersion.Version) -}} + {{- $criticalPriorityClass }} + {{- else if semverCompare ">=1.17-0" $root.Capabilities.KubeVersion.Version -}} + {{- $criticalPriorityClass }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} +{{- if semverCompare ">=1.16-0, <1.19-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate backend for Hubble UI ingress. +*/}} +{{- define "ingress.paths" -}} +{{ if semverCompare ">=1.4-0, <1.19-0" .Capabilities.KubeVersion.Version -}} +backend: + serviceName: hubble-ui + servicePort: http +{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}} +pathType: Prefix +backend: + service: + name: hubble-ui + port: + name: http +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "cronjob.apiVersion" -}} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} +{{- print "batch/v1" -}} +{{- else -}} +{{- print "batch/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "podDisruptionBudget.apiVersion" -}} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} +{{- print "policy/v1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} {{- end -}} {{- end -}} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml index 3e288525f..773a5b26b 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml @@ -53,7 +53,6 @@ spec: cilium.io/cilium-configmap-checksum: {{ include (print $.Template.BasePath "/cilium-configmap.yaml") . | sha256sum | quote }} {{- end }} {{- if not .Values.securityContext.privileged }} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} # Set app AppArmor's profile to "unconfined". The value of this annotation # can be modified as long users know which profiles they have available # in AppArmor. @@ -64,7 +63,6 @@ spec: container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: "unconfined" {{- end }} {{- end }} - {{- end }} {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} @@ -83,11 +81,6 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - {{- /* K8s version lower than 1.30.0 don't support the "appArmorProfile" field, */}} - {{- /* thus we have to remove it. */}} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} - {{- $_ := unset .Values.podSecurityContext "appArmorProfile" }} - {{- end }} {{- with .Values.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -122,6 +115,7 @@ spec: {{- with .Values.extraArgs }} {{- toYaml . | trim | nindent 8 }} {{- end }} + {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} startupProbe: httpGet: host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} @@ -135,6 +129,7 @@ spec: periodSeconds: {{ .Values.startupProbe.periodSeconds }} successThreshold: 1 initialDelaySeconds: 5 + {{- end }} livenessProbe: {{- if or .Values.keepDeprecatedProbes $defaultKeepDeprecatedProbes }} exec: @@ -152,6 +147,14 @@ spec: - name: "brief" value: "true" {{- end }} + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + # Starting from Kubernetes 1.20, we are using startupProbe instead + # of this field. + initialDelaySeconds: 120 + {{- end }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.livenessProbe.failureThreshold }} @@ -173,6 +176,9 @@ spec: - name: "brief" value: "true" {{- end }} + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + initialDelaySeconds: 5 + {{- end }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.readinessProbe.failureThreshold }} @@ -195,7 +201,6 @@ spec: valueFrom: resourceFieldRef: resource: limits.memory - divisor: '1' {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST value: {{ .Values.k8sServiceHost | quote }} @@ -400,9 +405,6 @@ spec: volumeMounts: - name: cilium-run mountPath: /var/run/cilium - {{- with .Values.extraVolumeMounts }} - {{- toYaml . | nindent 8 }} - {{- end }} {{- with .Values.monitor.resources }} resources: {{- toYaml . | trim | nindent 10 }} @@ -427,9 +429,6 @@ spec: {{- if (not (kindIs "invalid" .Values.daemon.blockedConfigOverrides)) }} - "--deny-config-keys={{.Values.daemon.blockedConfigOverrides}}" {{- end }} - {{- if .Values.kubeConfigPath }} - - "--k8s-kubeconfig-path={{ .Values.kubeConfigPath }}" - {{- end }} env: - name: K8S_NODE_NAME valueFrom: @@ -455,14 +454,6 @@ spec: volumeMounts: - name: tmp mountPath: /tmp - {{- if .Values.kubeConfigPath }} - - name: kube-config - mountPath: {{ .Values.kubeConfigPath }} - readOnly: true - {{- end }} - {{- with .Values.extraVolumeMounts }} - {{- toYaml . | nindent 8 }} - {{- end }} terminationMessagePolicy: FallbackToLogsOnError {{- if .Values.cgroup.autoMount.enabled }} # Required to mount cgroup2 filesystem on the underlying Kubernetes node. @@ -513,15 +504,9 @@ spec: drop: - ALL {{- end}} - {{- end }} - {{- if .Values.sysctlfix.enabled }} - name: apply-sysctl-overwrites image: {{ include "cilium.image" .Values.image | quote }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- with .Values.initResources }} - resources: - {{- toYaml . | trim | nindent 10 }} - {{- end }} env: - name: BIN_PATH value: {{ .Values.cni.binPath }} @@ -567,10 +552,6 @@ spec: - name: mount-bpf-fs image: {{ include "cilium.image" .Values.image | quote }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- with .Values.initResources }} - resources: - {{- toYaml . | trim | nindent 10 }} - {{- end }} args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' command: @@ -592,10 +573,6 @@ spec: - name: wait-for-node-init image: {{ include "cilium.image" .Values.image | quote }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- with .Values.initResources }} - resources: - {{- toYaml . | trim | nindent 10 }} - {{- end }} command: - sh - -c @@ -673,21 +650,14 @@ spec: mountPropagation: HostToContainer - name: cilium-run mountPath: /var/run/cilium - {{- with .Values.extraVolumeMounts }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.initResources }} + {{- with .Values.nodeinit.resources }} resources: {{- toYaml . | trim | nindent 10 }} {{- end }} - {{- if and .Values.waitForKubeProxy (and (ne (toString $kubeProxyReplacement) "strict") (ne (toString $kubeProxyReplacement) "true")) }} + {{- if and .Values.waitForKubeProxy (and (ne $kubeProxyReplacement "strict") (ne $kubeProxyReplacement "true")) }} - name: wait-for-kube-proxy image: {{ include "cilium.image" .Values.image | quote }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- with .Values.initResources }} - resources: - {{- toYaml . | trim | nindent 10 }} - {{- end }} securityContext: privileged: true command: @@ -746,21 +716,20 @@ spec: mountPath: /host/opt/cni/bin {{- end }} # .Values.cni.install - name: firewall-pods - image: {{ include "cilium.image" .Values.image | quote }} + image: ghcr.io/edgelesssys/cilium/cilium:v1.15.0-pre.3-edg.2@sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749 imagePullPolicy: IfNotPresent command: - /bin/bash - -exc - | pref=32 - for interface in $(ip route | awk '/^default/ { print $5 }'); do - tc qdisc add dev "${interface}" clsact || true - tc filter del dev "${interface}" ingress pref "${pref}" 2>/dev/null || true - handle=0 - for cidr in ${POD_CIDRS}; do - handle=$((handle + 1)) - tc filter replace dev "${interface}" ingress pref "${pref}" handle "${handle}" protocol ip flower dst_ip "${cidr}" action drop - done + interface=$(ip route | awk '/^default/ { print $5 }') + tc qdisc add dev "${interface}" clsact || true + tc filter del dev "${interface}" ingress pref "${pref}" 2>/dev/null || true + handle=0 + for cidr in ${POD_CIDRS}; do + handle=$((handle + 1)) + tc filter replace dev "${interface}" ingress pref "${pref}" handle "${handle}" protocol ip flower dst_ip "${cidr}" action drop done env: - name: POD_CIDRS @@ -779,6 +748,7 @@ spec: - NET_ADMIN restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.priorityClassName "system-node-critical") }} + serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.cilium.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.cilium.automount }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} @@ -828,8 +798,8 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate {{- end }} - {{- if or .Values.cgroup.autoMount.enabled .Values.sysctlfix.enabled }} - # To mount cgroup2 filesystem on the host or apply sysctlfix + {{- if .Values.cgroup.autoMount.enabled }} + # To mount cgroup2 filesystem on the host - name: hostproc hostPath: path: /proc diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-agent/servicemonitor.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-agent/servicemonitor.yaml index c2ffa66c2..d7c5e5e3c 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-agent/servicemonitor.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-agent/servicemonitor.yaml @@ -39,20 +39,6 @@ spec: metricRelabelings: {{- toYaml . | nindent 4 }} {{- end }} - {{- if .Values.envoy.prometheus.serviceMonitor.enabled }} - - port: envoy-metrics - interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }} - honorLabels: true - path: /metrics - {{- with .Values.envoy.prometheus.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.envoy.prometheus.serviceMonitor.metricRelabelings }} - metricRelabelings: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} targetLabels: - k8s-app {{- if .Values.prometheus.serviceMonitor.jobLabel }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-configmap.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-configmap.yaml index 9d393c311..b67e50392 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-configmap.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-configmap.yaml @@ -14,7 +14,6 @@ {{- $azureUsePrimaryAddress := "true" -}} {{- $defaultK8sClientQPS := 5 -}} {{- $defaultK8sClientBurst := 10 -}} -{{- $defaultDNSProxyEnableTransparentMode := "false" -}} {{- /* Default values when 1.8 was initially deployed */ -}} {{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}} @@ -49,7 +48,6 @@ {{- $azureUsePrimaryAddress = "false" -}} {{- end }} {{- $defaultKubeProxyReplacement = "disabled" -}} - {{- $defaultDNSProxyEnableTransparentMode = "true" -}} {{- end -}} {{- /* Default values when 1.14 was initially deployed */ -}} @@ -366,11 +364,6 @@ data: enable-host-legacy-routing: "true" {{- end }} -{{- if .Values.bpf.nodeMapMax }} - # node-map-max specifies the maximum number of entries for the node map. - bpf-node-map-max: {{ .Values.bpf.nodeMapMax | quote }} -{{- end }} - {{- if .Values.bpf.authMapMax }} # bpf-auth-map-max specifies the maximum number of entries in the auth map bpf-auth-map-max: {{ .Values.bpf.authMapMax | quote }} @@ -455,15 +448,9 @@ data: # - vxlan (default) # - geneve {{- if .Values.gke.enabled }} - {{- if ne (.Values.routingMode | default "native") "native" }} - {{- fail (printf "RoutingMode must be set to native when gke.enabled=true" )}} - {{- end }} routing-mode: "native" enable-endpoint-routes: "true" {{- else if .Values.aksbyocni.enabled }} - {{- if ne (.Values.routingMode | default "tunnel") "tunnel" }} - {{- fail (printf "RoutingMode must be set to tunnel when aksbyocni.enabled=true" )}} - {{- end }} routing-mode: "tunnel" tunnel-protocol: "vxlan" {{- else if .Values.routingMode }} @@ -1166,16 +1153,6 @@ data: {{- end }} {{- if .Values.dnsProxy }} - {{- if hasKey .Values.dnsProxy "enableTransparentMode" }} - # explicit setting gets precedence - dnsproxy-enable-transparent-mode: {{ .Values.dnsProxy.enableTransparentMode | quote }} - {{- else if eq $cniChainingMode "none" }} - # default DNS proxy to transparent mode in non-chaining modes - dnsproxy-enable-transparent-mode: {{ $defaultDNSProxyEnableTransparentMode | quote }} - {{- end }} - {{- if (not (kindIs "invalid" .Values.dnsProxy.socketLingerTimeout)) }} - dnsproxy-socket-linger-timeout: {{ .Values.dnsProxy.socketLingerTimeout | quote }} - {{- end }} {{- if .Values.dnsProxy.dnsRejectResponseCode }} tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }} {{- end }} @@ -1229,12 +1206,9 @@ data: mesh-auth-spiffe-trust-domain: {{ .Values.authentication.mutual.spire.trustDomain | quote }} {{- end }} - proxy-xff-num-trusted-hops-ingress: {{ .Values.envoy.xffNumTrustedHopsL7PolicyIngress | quote }} - proxy-xff-num-trusted-hops-egress: {{ .Values.envoy.xffNumTrustedHopsL7PolicyEgress | quote }} proxy-connect-timeout: {{ .Values.envoy.connectTimeoutSeconds | quote }} proxy-max-requests-per-connection: {{ .Values.envoy.maxRequestsPerConnection | quote }} proxy-max-connection-duration-seconds: {{ .Values.envoy.maxConnectionDurationSeconds | quote }} - proxy-idle-timeout-seconds: {{ .Values.envoy.idleTimeoutDurationSeconds | quote }} external-envoy-proxy: {{ .Values.envoy.enabled | quote }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/configmap.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/configmap.yaml index 4b6b9218f..990cf951a 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/configmap.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/configmap.yaml @@ -11,7 +11,6 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} data: - # Keep the key name as bootstrap-config.json to avoid breaking changes - bootstrap-config.json: | - {{- (tpl (.Files.Get "files/cilium-envoy/configmap/bootstrap-config.yaml") .) | fromYaml | toJson | nindent 4 }} +{{- (tpl (.Files.Glob "files/cilium-envoy/configmap/bootstrap-config.json").AsConfig .) | nindent 2 }} + {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/daemonset.yaml index d20e383f5..baec799ec 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/daemonset.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/daemonset.yaml @@ -26,18 +26,20 @@ spec: template: metadata: annotations: + {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled (not .Values.envoy.prometheus.serviceMonitor.enabled) }} + prometheus.io/port: "{{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }}" + prometheus.io/scrape: "true" + {{- end }} {{- if .Values.envoy.rollOutPods }} # ensure pods roll when configmap updates cilium.io/cilium-envoy-configmap-checksum: {{ include (print $.Template.BasePath "/cilium-envoy/configmap.yaml") . | sha256sum | quote }} {{- end }} {{- if not .Values.envoy.securityContext.privileged }} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} # Set app AppArmor's profile to "unconfined". The value of this annotation # can be modified as long users know which profiles they have available # in AppArmor. container.apparmor.security.beta.kubernetes.io/cilium-envoy: "unconfined" {{- end }} - {{- end }} {{- with .Values.envoy.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} @@ -54,11 +56,6 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - {{- /* K8s version lower than 1.30.0 don't support the "appArmorProfile" field, */}} - {{- /* thus we have to remove it. */}} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} - {{- $_ := unset .Values.envoy.podSecurityContext "appArmorProfile" }} - {{- end }} {{- with .Values.envoy.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -86,9 +83,10 @@ spec: {{- with .Values.envoy.extraArgs }} {{- toYaml . | trim | nindent 8 }} {{- end }} + {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} startupProbe: httpGet: - host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} + host: "localhost" path: /healthz port: {{ .Values.envoy.healthPort }} scheme: HTTP @@ -96,22 +94,34 @@ spec: periodSeconds: {{ .Values.envoy.startupProbe.periodSeconds }} successThreshold: 1 initialDelaySeconds: 5 + {{- end }} livenessProbe: httpGet: - host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} + host: "localhost" path: /healthz port: {{ .Values.envoy.healthPort }} scheme: HTTP + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + # Starting from Kubernetes 1.20, we are using startupProbe instead + # of this field. + initialDelaySeconds: 120 + {{- end }} periodSeconds: {{ .Values.envoy.livenessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.envoy.livenessProbe.failureThreshold }} timeoutSeconds: 5 readinessProbe: httpGet: - host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} + host: "localhost" path: /healthz port: {{ .Values.envoy.healthPort }} scheme: HTTP + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + initialDelaySeconds: 5 + {{- end }} periodSeconds: {{ .Values.envoy.readinessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.envoy.readinessProbe.failureThreshold }} @@ -197,6 +207,7 @@ spec: {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.envoy.priorityClassName "system-node-critical") }} + serviceAccount: {{ .Values.serviceAccounts.envoy.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.envoy.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.envoy.automount }} terminationGracePeriodSeconds: {{ .Values.envoy.terminationGracePeriodSeconds }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/servicemonitor.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/servicemonitor.yaml index 10f84d82b..3d6b745e3 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-envoy/servicemonitor.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-envoy/servicemonitor.yaml @@ -7,16 +7,15 @@ metadata: namespace: {{ .Values.envoy.prometheus.serviceMonitor.namespace | default .Release.Namespace }} labels: app.kubernetes.io/part-of: cilium - app.kubernetes.io/name: cilium-envoy {{- with .Values.envoy.prometheus.serviceMonitor.labels }} {{- toYaml . | nindent 4 }} {{- end }} - {{- if or .Values.envoy.prometheus.serviceMonitor.annotations .Values.envoy.annotations }} + {{- if or .Values.envoy.prometheus.serviceMonitor .Values.envoy.annotations }} annotations: {{- with .Values.envoy.annotations }} {{- toYaml . | nindent 4 }} {{- end }} - {{- with .Values.envoy.prometheus.serviceMonitor.annotations }} + {{- with .Values.envoy.prometheus.serviceMonitor }} {{- toYaml . | nindent 4 }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-ingress-service.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-ingress-service.yaml index 0e489bdac..ff6269d22 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-ingress-service.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-ingress-service.yaml @@ -24,12 +24,14 @@ spec: protocol: TCP nodePort: {{ .Values.ingressController.service.secureNodePort }} type: {{ .Values.ingressController.service.type }} + {{- if semverCompare ">=1.24-0" .Capabilities.KubeVersion.Version -}} {{- if .Values.ingressController.service.loadBalancerClass }} loadBalancerClass: {{ .Values.ingressController.service.loadBalancerClass }} {{- end }} {{- if (not (kindIs "invalid" .Values.ingressController.service.allocateLoadBalancerNodePorts)) }} allocateLoadBalancerNodePorts: {{ .Values.ingressController.service.allocateLoadBalancerNodePorts }} {{- end }} + {{- end -}} {{- if .Values.ingressController.service.loadBalancerIP }} loadBalancerIP: {{ .Values.ingressController.service.loadBalancerIP }} {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-nodeinit/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-nodeinit/daemonset.yaml index c92eabfa6..76f1a20d2 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-nodeinit/daemonset.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-nodeinit/daemonset.yaml @@ -28,13 +28,11 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} {{- if not .Values.securityContext.privileged }} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} # Set app AppArmor's profile to "unconfined". The value of this annotation # can be modified as long users know which profiles they have available # in AppArmor. container.apparmor.security.beta.kubernetes.io/node-init: "unconfined" {{- end }} - {{- end }} labels: app: cilium-node-init app.kubernetes.io/part-of: cilium @@ -47,15 +45,6 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - {{- /* K8s version lower than 1.30.0 don't support the "appArmorProfile" field, */}} - {{- /* thus we have to remove it. */}} - {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} - {{- $_ := unset .Values.nodeinit.podSecurityContext "appArmorProfile" }} - {{- end }} - {{- with .Values.nodeinit.podSecurityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} containers: - name: node-init image: {{ include "cilium.image" .Values.nodeinit.image | quote }} @@ -114,6 +103,7 @@ spec: hostNetwork: true priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.nodeinit.priorityClassName "system-node-critical") }} {{- if .Values.serviceAccounts.nodeinit.enabled }} + serviceAccount: {{ .Values.serviceAccounts.nodeinit.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.nodeinit.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.nodeinit.automount }} {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-operator/deployment.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-operator/deployment.yaml index 5c6c467cf..4f4450e51 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-operator/deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-operator/deployment.yaml @@ -252,6 +252,7 @@ spec: {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.operator.priorityClassName "system-cluster-critical") }} + serviceAccount: {{ .Values.serviceAccounts.operator.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.operator.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.operator.automount }} {{- with .Values.operator.affinity }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml index 05b251046..a224b9e6c 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.operator.enabled .Values.operator.podDisruptionBudget.enabled }} {{- $component := .Values.operator.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: cilium-operator diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/daemonset.yaml index b5228616b..bc13be432 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/daemonset.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/daemonset.yaml @@ -70,13 +70,8 @@ spec: - /tmp/ready initialDelaySeconds: 5 periodSeconds: 5 - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName {{- with .Values.preflight.extraEnv }} + env: {{- toYaml . | trim | nindent 12 }} {{- end }} volumeMounts: @@ -176,13 +171,10 @@ spec: dnsPolicy: ClusterFirstWithHostNet restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.preflight.priorityClassName "system-node-critical") }} + serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.preflight.automount }} terminationGracePeriodSeconds: {{ .Values.preflight.terminationGracePeriodSeconds }} - {{- with .Values.preflight.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} {{- with .Values.preflight.tolerations }} tolerations: {{- toYaml . | trim | nindent 8 }} diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/deployment.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/deployment.yaml index 1f87d2076..efd923b2d 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/deployment.yaml @@ -60,10 +60,6 @@ spec: - /tmp/ready-validate-cnp initialDelaySeconds: 5 periodSeconds: 5 - {{- with .Values.preflight.extraVolumeMounts }} - volumeMounts: - {{- toYaml . | nindent 10 }} - {{- end }} env: {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST @@ -81,15 +77,11 @@ spec: {{- toYaml . | trim | nindent 12 }} {{- end }} terminationMessagePolicy: FallbackToLogsOnError - {{- with .Values.preflight.extraVolumes }} - volumes: - {{- toYaml . | trim | nindent 6 }} - {{- end }} hostNetwork: true restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.preflight.priorityClassName "system-cluster-critical") }} + serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }} - automountServiceAccountToken: {{ .Values.serviceAccounts.preflight.automount }} terminationGracePeriodSeconds: {{ .Values.preflight.terminationGracePeriodSeconds }} {{- with .Values.preflight.affinity }} affinity: diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml index c00d9b896..4b3c7cb0d 100644 --- a/internal/constellation/helm/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.preflight.enabled .Values.preflight.validateCNPs .Values.preflight.podDisruptionBudget.enabled }} {{- $component := .Values.preflight.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: cilium-pre-flight-check diff --git a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/deployment.yaml b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/deployment.yaml index f0d551bb6..a1d7a6df5 100644 --- a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/deployment.yaml @@ -59,7 +59,7 @@ spec: {{- end }} # These need to match the equivalent arguments to etcd in the main container. - --etcd-cluster-name=clustermesh-apiserver - - --etcd-initial-cluster-token=$(INITIAL_CLUSTER_TOKEN) + - --etcd-initial-cluster-token=clustermesh-apiserver - --etcd-data-dir=/var/run/etcd {{- with .Values.clustermesh.apiserver.etcd.init.extraArgs }} {{- toYaml . | trim | nindent 8 }} @@ -76,23 +76,12 @@ spec: configMapKeyRef: name: cilium-config key: cluster-name - - name: INITIAL_CLUSTER_TOKEN - valueFrom: - fieldRef: - fieldPath: metadata.uid {{- with .Values.clustermesh.apiserver.etcd.init.extraEnv }} {{- toYaml . | trim | nindent 8 }} {{- end }} - {{- with .Values.clustermesh.apiserver.etcd.securityContext }} - securityContext: - {{- toYaml . | nindent 10 }} - {{- end }} volumeMounts: - name: etcd-data-dir mountPath: /var/run/etcd - {{- with .Values.clustermesh.apiserver.extraVolumeMounts }} - {{- toYaml . | nindent 8 }} - {{- end }} terminationMessagePolicy: FallbackToLogsOnError {{- with .Values.clustermesh.apiserver.etcd.init.resources }} resources: @@ -116,7 +105,7 @@ spec: # uses net.SplitHostPort() internally and it accepts the that format. - --listen-client-urls=https://127.0.0.1:2379,https://[$(HOSTNAME_IP)]:2379 - --advertise-client-urls=https://[$(HOSTNAME_IP)]:2379 - - --initial-cluster-token=$(INITIAL_CLUSTER_TOKEN) + - --initial-cluster-token=clustermesh-apiserver - --auto-compaction-retention=1 {{- if .Values.clustermesh.apiserver.metrics.etcd.enabled }} - --listen-metrics-urls=http://[$(HOSTNAME_IP)]:{{ .Values.clustermesh.apiserver.metrics.etcd.port }} @@ -129,10 +118,6 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - - name: INITIAL_CLUSTER_TOKEN - valueFrom: - fieldRef: - fieldPath: metadata.uid ports: - name: etcd containerPort: 2379 @@ -148,9 +133,6 @@ spec: readOnly: true - name: etcd-data-dir mountPath: /var/run/etcd - {{- with .Values.clustermesh.apiserver.extraVolumeMounts }} - {{- toYaml . | nindent 8 }} - {{- end }} terminationMessagePolicy: FallbackToLogsOnError {{- with .Values.clustermesh.apiserver.etcd.resources }} resources: @@ -404,6 +386,7 @@ spec: {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.clustermesh.apiserver.priorityClassName "system-cluster-critical") }} + serviceAccount: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }} terminationGracePeriodSeconds: {{ .Values.clustermesh.apiserver.terminationGracePeriodSeconds }} automountServiceAccountToken: {{ .Values.serviceAccounts.clustermeshApiserver.automount }} diff --git a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml index a5d30b7b1..4a1bbf7e0 100644 --- a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.podDisruptionBudget.enabled }} {{- $component := .Values.clustermesh.apiserver.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: clustermesh-apiserver diff --git a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/service.yaml b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/service.yaml index 14daaeb59..0a7028c54 100644 --- a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/service.yaml +++ b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/service.yaml @@ -26,9 +26,6 @@ spec: {{- if and (eq "NodePort" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.nodePort }} nodePort: {{ .Values.clustermesh.apiserver.service.nodePort }} {{- end }} - {{- if and (eq "LoadBalancer" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.loadBalancerClass }} - loadBalancerClass: {{ .Values.clustermesh.apiserver.service.loadBalancerClass }} - {{- end }} {{- if and (eq "LoadBalancer" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.loadBalancerIP }} loadBalancerIP: {{ .Values.clustermesh.apiserver.service.loadBalancerIP }} {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml index 8c0e4cd5c..946602b40 100644 --- a/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml +++ b/internal/constellation/helm/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml @@ -1,5 +1,5 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.clustermesh.apiserver.tls.auto.schedule }} -apiVersion: batch/v1 +apiVersion: {{ include "cronjob.apiVersion" . }} kind: CronJob metadata: name: clustermesh-apiserver-generate-certs diff --git a/internal/constellation/helm/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml b/internal/constellation/helm/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml index 7aefc0d35..5946219f4 100644 --- a/internal/constellation/helm/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml @@ -110,6 +110,7 @@ spec: hostNetwork: true priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.clustermesh.apiserver.priorityClassName "system-cluster-critical") }} restartPolicy: Always + serviceAccount: {{ .Values.serviceAccounts.etcd.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.etcd.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.etcd.automount }} {{- with .Values.etcd.nodeSelector }} diff --git a/internal/constellation/helm/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml index d604e5222..5939b4ae9 100644 --- a/internal/constellation/helm/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.etcd.managed .Values.etcd.podDisruptionBudget.enabled }} {{- $component := .Values.etcd.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: cilium-etcd-operator diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-relay/deployment.yaml b/internal/constellation/helm/charts/cilium/templates/hubble-relay/deployment.yaml index 5a5fb35a8..14d6c62c6 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-relay/deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble-relay/deployment.yaml @@ -71,37 +71,26 @@ spec: protocol: TCP {{- end }} readinessProbe: - grpc: - port: 4222 - timeoutSeconds: 3 - # livenessProbe will kill the pod, we should be very conservative - # here on failures since killing the pod should be a last resort, and - # we should provide enough time for relay to retry before killing it. + {{- include "hubble-relay.probe" . | nindent 12 }} + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + # Starting from Kubernetes 1.20, we are using startupProbe instead + # of this field. + initialDelaySeconds: 5 + {{- end }} livenessProbe: - grpc: - port: 4222 - timeoutSeconds: 10 - # Give relay time to establish connections and make a few retries - # before starting livenessProbes. - initialDelaySeconds: 10 - # 10 second * 12 failures = 2 minutes of failure. - # If relay cannot become healthy after 2 minutes, then killing it - # might resolve whatever issue is occurring. - # - # 10 seconds is a reasonable retry period so we can see if it's - # failing regularly or only sporadically. - periodSeconds: 10 - failureThreshold: 12 + {{- include "hubble-relay.probe" . | nindent 12 }} + {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} + # Starting from Kubernetes 1.20, we are using startupProbe instead + # of this field. + initialDelaySeconds: 60 + {{- end }} + {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} startupProbe: - grpc: - port: 4222 - # Give relay time to get it's certs and establish connections and - # make a few retries before starting startupProbes. - initialDelaySeconds: 10 - # 20 * 3 seconds = 1 minute of failure before we consider startup as failed. + # give the relay one minute to start up + {{- include "hubble-relay.probe" . | nindent 12 }} failureThreshold: 20 - # Retry more frequently at startup so that it can be considered started more quickly. periodSeconds: 3 + {{- end }} {{- with .Values.hubble.relay.extraEnv }} env: {{- toYaml . | trim | nindent 12 }} @@ -119,12 +108,10 @@ spec: mountPath: /var/lib/hubble-relay/tls readOnly: true {{- end }} - {{- with .Values.hubble.relay.extraVolumeMounts }} - {{- toYaml . | nindent 10 }} - {{- end }} terminationMessagePolicy: FallbackToLogsOnError restartPolicy: Always priorityClassName: {{ .Values.hubble.relay.priorityClassName }} + serviceAccount: {{ .Values.serviceAccounts.relay.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.relay.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.relay.automount }} terminationGracePeriodSeconds: {{ .Values.hubble.relay.terminationGracePeriodSeconds }} @@ -191,7 +178,18 @@ spec: path: server.key {{- end }} {{- end }} - {{- with .Values.hubble.relay.extraVolumes }} - {{- toYaml . | nindent 6 }} - {{- end }} +{{- end }} + +{{- define "hubble-relay.probe" }} +{{- /* This distinction can be removed once we drop support for k8s 1.23 */}} +{{- if semverCompare ">=1.24-0" .Capabilities.KubeVersion.Version -}} +grpc: + port: 4222 +{{- else }} +exec: + command: + - grpc_health_probe + - -addr=localhost:4222 +{{- end }} +timeoutSeconds: 3 {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml index 6162cb81d..4fd6da9ba 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.hubble.enabled .Values.hubble.relay.enabled .Values.hubble.relay.podDisruptionBudget.enabled }} {{- $component := .Values.hubble.relay.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: hubble-relay diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-ui/_nginx.tpl b/internal/constellation/helm/charts/cilium/templates/hubble-ui/_nginx.tpl index 5d3d0a80e..e787b5aad 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-ui/_nginx.tpl +++ b/internal/constellation/helm/charts/cilium/templates/hubble-ui/_nginx.tpl @@ -13,12 +13,24 @@ server { proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; + # CORS + add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS"; + add_header Access-Control-Allow-Origin *; + add_header Access-Control-Max-Age 1728000; + add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message; + add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout; + if ($request_method = OPTIONS) { + return 204; + } + # /CORS + location {{ .Values.hubble.ui.baseUrl }}api { {{- if not (eq .Values.hubble.ui.baseUrl "/") }} rewrite ^{{ (trimSuffix "/" .Values.hubble.ui.baseUrl) }}(/.*)$ $1 break; {{- end }} proxy_http_version 1.1; proxy_pass_request_headers on; + proxy_hide_header Access-Control-Allow-Origin; {{- if eq .Values.hubble.ui.baseUrl "/" }} proxy_pass http://127.0.0.1:8090; {{- else }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-ui/deployment.yaml b/internal/constellation/helm/charts/cilium/templates/hubble-ui/deployment.yaml index 105907a5f..a7dd5cb8f 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-ui/deployment.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble-ui/deployment.yaml @@ -40,10 +40,13 @@ spec: {{- end }} spec: {{- with .Values.hubble.ui.securityContext }} + {{- if .enabled }} securityContext: {{- omit . "enabled" | toYaml | nindent 8 }} + {{- end}} {{- end }} priorityClassName: {{ .Values.hubble.ui.priorityClassName }} + serviceAccount: {{ .Values.serviceAccounts.ui.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.ui.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.ui.automount }} {{- with .Values.imagePullSecrets }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-ui/ingress.yaml b/internal/constellation/helm/charts/cilium/templates/hubble-ui/ingress.yaml index 348e281d7..2c0ff7d3e 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-ui/ingress.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble-ui/ingress.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.hubble.ui.ingress.enabled }} {{- $baseUrl := .Values.hubble.ui.baseUrl -}} -apiVersion: networking.k8s.io/v1 +apiVersion: {{ template "ingress.apiVersion" . }} kind: Ingress metadata: name: hubble-ui @@ -35,11 +35,6 @@ spec: http: paths: - path: {{ $baseUrl | quote }} - pathType: Prefix - backend: - service: - name: hubble-ui - port: - name: http + {{- include "ingress.paths" $ | nindent 12 }} {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml b/internal/constellation/helm/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml index c23e3ad04..af3b6705d 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.hubble.ui.podDisruptionBudget.enabled }} {{- $component := .Values.hubble.ui.podDisruptionBudget }} -apiVersion: policy/v1 +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} kind: PodDisruptionBudget metadata: name: hubble-ui diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/peer-service.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/peer-service.yaml index aec3f889a..7ba56456b 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/peer-service.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/peer-service.yaml @@ -24,5 +24,7 @@ spec: {{- end }} protocol: TCP targetPort: {{ .Values.hubble.peerService.targetPort }} +{{- if semverCompare ">=1.22-0" .Capabilities.KubeVersion.GitVersion }} internalTrafficPolicy: Local {{- end }} +{{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-client-secret.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-client-secret.yaml index 373d6c541..1dd96b18c 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-client-secret.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-client-secret.yaml @@ -19,9 +19,4 @@ spec: duration: {{ printf "%dh0m0s" (mul .Values.hubble.tls.auto.certValidityDuration 24) }} privateKey: rotationPolicy: Always - isCA: false - usages: - - signing - - key encipherment - - client auth {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-server-secret.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-server-secret.yaml index c33b912b1..845b4fb8e 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-server-secret.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/relay-server-secret.yaml @@ -28,9 +28,4 @@ spec: duration: {{ printf "%dh0m0s" (mul .Values.hubble.tls.auto.certValidityDuration 24) }} privateKey: rotationPolicy: Always - isCA: false - usages: - - signing - - key encipherment - - server auth {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/server-secret.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/server-secret.yaml index b34f27c52..5f202e10b 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/server-secret.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/server-secret.yaml @@ -29,10 +29,4 @@ spec: duration: {{ printf "%dh0m0s" (mul .Values.hubble.tls.auto.certValidityDuration 24) }} privateKey: rotationPolicy: Always - isCA: false - usages: - - signing - - key encipherment - - server auth - - client auth {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/ui-client-certs.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/ui-client-certs.yaml index 64ace1872..5006666ec 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/ui-client-certs.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/tls-certmanager/ui-client-certs.yaml @@ -19,9 +19,4 @@ spec: duration: {{ printf "%dh0m0s" (mul .Values.hubble.tls.auto.certValidityDuration 24) }} privateKey: rotationPolicy: Always - isCA: false - usages: - - signing - - key encipherment - - client auth {{- end }} diff --git a/internal/constellation/helm/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml b/internal/constellation/helm/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml index 7d9f7174c..fa9966080 100644 --- a/internal/constellation/helm/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml +++ b/internal/constellation/helm/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml @@ -1,5 +1,5 @@ {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.hubble.tls.auto.schedule }} -apiVersion: batch/v1 +apiVersion: {{ include "cronjob.apiVersion" . }} kind: CronJob metadata: name: hubble-generate-certs diff --git a/internal/constellation/helm/charts/cilium/templates/spire/agent/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/spire/agent/daemonset.yaml index 6c0bffe78..f515a9ac9 100644 --- a/internal/constellation/helm/charts/cilium/templates/spire/agent/daemonset.yaml +++ b/internal/constellation/helm/charts/cilium/templates/spire/agent/daemonset.yaml @@ -99,12 +99,10 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} + {{- with .Values.authentication.mutual.spire.install.agent.tolerations }} tolerations: - {{- with .Values.authentication.mutual.spire.install.agent.tolerations }} - {{- toYaml . | trim | nindent 8 }} - {{- end }} - - key: {{ .Values.agentNotReadyTaintKey | default "node.cilium.io/agent-not-ready" }} - effect: NoSchedule + {{- toYaml . | trim | nindent 8 }} + {{- end }} volumes: - name: spire-config configMap: diff --git a/internal/constellation/helm/charts/cilium/templates/spire/namespace.yaml b/internal/constellation/helm/charts/cilium/templates/spire/namespace.yaml index ccd386808..1c281f4f7 100644 --- a/internal/constellation/helm/charts/cilium/templates/spire/namespace.yaml +++ b/internal/constellation/helm/charts/cilium/templates/spire/namespace.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled (not .Values.authentication.mutual.spire.install.existingNamespace) -}} +{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled -}} apiVersion: v1 kind: Namespace metadata: diff --git a/internal/constellation/helm/charts/cilium/templates/validate.yaml b/internal/constellation/helm/charts/cilium/templates/validate.yaml index fabd69fe9..3c89e4e38 100644 --- a/internal/constellation/helm/charts/cilium/templates/validate.yaml +++ b/internal/constellation/helm/charts/cilium/templates/validate.yaml @@ -1,17 +1,3 @@ -{{/* validate deprecated options are not being used */}} -{{- if .Values.tunnel }} - {{ fail "tunnel was deprecated in v1.14 and has been removed in v1.15. For details please refer to https://docs.cilium.io/en/v1.15/operations/upgrade/#helm-options" }} -{{- end }} -{{- if or (dig "clustermesh" "apiserver" "tls" "ca" "cert" "" .Values.AsMap) (dig "clustermesh" "apiserver" "tls" "ca" "key" "" .Values.AsMap) }} - {{ fail "clustermesh.apiserver.tls.ca.cert and clustermesh.apiserver.tls.ca.key were deprecated in v1.14 and has been removed in v1.15. For details please refer to https://docs.cilium.io/en/v1.15/operations/upgrade/#helm-options" }} -{{- end }} -{{- if .Values.enableK8sEventHandover }} - {{ fail "enableK8sEventHandover was deprecated in v1.14 and has been removed in v1.15. For details please refer to https://docs.cilium.io/en/v1.15/operations/upgrade/#helm-options" }} -{{- end }} -{{- if .Values.enableCnpStatusUpdates }} - {{ fail "enableCnpStatusUpdates was deprecated in v1.14 and has been removed in v1.15. For details please refer to https://docs.cilium.io/en/v1.15/operations/upgrade/#helm-options" }} -{{- end }} - {{/* validate hubble config */}} {{- if and .Values.hubble.ui.enabled (not .Values.hubble.ui.standalone.enabled) }} {{- if not .Values.hubble.relay.enabled }} diff --git a/internal/constellation/helm/charts/cilium/values.yaml b/internal/constellation/helm/charts/cilium/values.yaml index c87bdc204..712d981a7 100644 --- a/internal/constellation/helm/charts/cilium/values.yaml +++ b/internal/constellation/helm/charts/cilium/values.yaml @@ -146,7 +146,7 @@ rollOutCiliumPods: false image: override: ~ repository: "quay.io/cilium/cilium" - tag: "v1.15.19" + tag: "v1.15.0-pre.3" pullPolicy: "IfNotPresent" # cilium-digest digest: "" @@ -218,10 +218,8 @@ extraConfig: {} annotations: {} # -- Security Context for cilium-agent pods. -podSecurityContext: - # -- AppArmorProfile options for the `cilium-agent` and init containers - appArmorProfile: - type: "Unconfined" +podSecurityContext: {} + # -- Annotations to be added to agent pods podAnnotations: {} @@ -238,9 +236,6 @@ resources: {} # cpu: 100m # memory: 512Mi -# -- resources & limits for the agent init containers -initResources: {} - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -470,17 +465,7 @@ bpf: # @default -- `524288` neighMax: ~ - # @schema - # type: [null, integer] - # @schema - # @default -- `16384` - # -- (int) Configures the maximum number of entries for the node table. - nodeMapMax: ~ - # -- Configure the maximum number of entries in endpoint policy map (per endpoint). - # @schema - # type: [null, integer] - # @schema policyMapMax: 16384 # -- (float64) Configure auto-sizing for all BPF maps based on available memory. @@ -981,8 +966,8 @@ certgen: image: override: ~ repository: "quay.io/cilium/certgen" - tag: "v0.1.19" - digest: "sha256:28511366bb5dc99b6ec424dc87399945714d57a586194658d9e2316ba3db4d04" + tag: "v0.1.9" + digest: "sha256:89a0847753686444daabde9474b48340993bd19c7bea66a46e45b2974b82041f" useDigest: true pullPolicy: "IfNotPresent" # -- Seconds after which the completed job pod will be deleted @@ -1157,7 +1142,7 @@ hubble: # # --set hubble.redact.enabled="true" # --set hubble.redact.kafka.apiKey="true" - apiKey: true + apiKey: false # -- An additional address for Hubble to listen to. # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that @@ -1240,7 +1225,7 @@ hubble: image: override: ~ repository: "quay.io/cilium/hubble-relay" - tag: "v1.15.19" + tag: "v1.15.0-pre.3" # hubble-relay-digest digest: "" useDigest: false @@ -1311,12 +1296,6 @@ hubble: rollingUpdate: maxUnavailable: 1 - # -- Additional hubble-relay volumes. - extraVolumes: [] - - # -- Additional hubble-relay volumeMounts. - extraVolumeMounts: [] - # -- hubble-relay pod security context podSecurityContext: fsGroup: 65532 @@ -1477,8 +1456,8 @@ hubble: image: override: ~ repository: "quay.io/cilium/hubble-ui-backend" - tag: "v0.13.2" - digest: "sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15" + tag: "v0.12.1" + digest: "sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe" useDigest: true pullPolicy: "IfNotPresent" @@ -1516,8 +1495,8 @@ hubble: image: override: ~ repository: "quay.io/cilium/hubble-ui" - tag: "v0.13.2" - digest: "sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392" + tag: "v0.12.1" + digest: "sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267" useDigest: true pullPolicy: "IfNotPresent" @@ -2075,18 +2054,14 @@ envoy: # -- Set Envoy upstream HTTP idle connection timeout seconds. # Does not apply to connections with pending requests. Default 60s idleTimeoutDurationSeconds: 60 - # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. - xffNumTrustedHopsL7PolicyIngress: 0 - # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. - xffNumTrustedHopsL7PolicyEgress: 0 # -- Envoy container image. image: override: ~ repository: "quay.io/cilium/cilium-envoy" - tag: "v1.33.4-1752151664-7c2edb0b44cf95f326d628b837fcdd845102ba68" + tag: "v1.27.2-f19708f3d0188fe39b7e024b4525b75a9eeee61f" pullPolicy: "IfNotPresent" - digest: "sha256:318eff387835ca2717baab42a84f35a83a5f9e7d519253df87269f80b9ff0171" + digest: "sha256:80de27c1d16ab92923cc0cd1fff90f2e7047a9abf3906fda712268d9cbc5b950" useDigest: true # -- Additional containers added to the cilium Envoy DaemonSet. @@ -2132,10 +2107,8 @@ envoy: annotations: {} # -- Security Context for cilium-envoy pods. - podSecurityContext: - # -- AppArmorProfile options for the `cilium-agent` and init containers - appArmorProfile: - type: "Unconfined" + podSecurityContext: {} + # -- Annotations to be added to envoy pods podAnnotations: {} @@ -2204,20 +2177,7 @@ envoy: labelSelector: matchLabels: k8s-app: cilium-envoy - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: cilium.io/no-schedule - operator: NotIn - values: - - "true" + # -- Node selector for cilium-envoy. nodeSelector: kubernetes.io/os: linux @@ -2238,16 +2198,12 @@ envoy: # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ~ - # -- Configure Cilium Envoy Prometheus options. - # Note that some of these apply to either cilium-agent or cilium-envoy. prometheus: # -- Enable prometheus metrics for cilium-envoy enabled: true serviceMonitor: # -- Enable service monitors. # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) - # Note that this setting applies to both cilium-envoy _and_ cilium-agent - # with Envoy enabled. enabled: false # -- Labels to add to ServiceMonitor cilium-envoy labels: {} @@ -2259,21 +2215,18 @@ envoy: # service monitors configured. # namespace: "" # -- Relabeling configs for the ServiceMonitor cilium-envoy - # or for cilium-agent with Envoy configured. relabelings: - sourceLabels: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy - # or for cilium-agent with Envoy configured. metricRelabelings: ~ # -- Serve prometheus metrics for cilium-envoy on the configured port port: "9964" # -- Enable use of the remote node identity. # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -# Deprecated without replacement in 1.15. To be removed in 1.16. remoteNodeIdentity: true # -- Enable resource quotas for priority classes used in the cluster. @@ -2507,7 +2460,7 @@ operator: image: override: ~ repository: "quay.io/cilium/operator" - tag: "v1.15.19" + tag: "v1.15.0-pre.3" # operator-generic-digest genericDigest: "" # operator-azure-digest @@ -2710,9 +2663,7 @@ nodeinit: image: override: ~ repository: "quay.io/cilium/startup-script" - tag: "c54c7edeab7fde4da68e59acd319ab24af242c3f" - digest: "sha256:8d7b41c4ca45860254b3c19e20210462ef89479bb6331d6760c4e609d651b29c" - useDigest: true + tag: "62093c5c233ea914bfa26a10ba41f8780d9b737f" pullPolicy: "IfNotPresent" # -- The priority class to use for the nodeinit pod. @@ -2756,11 +2707,7 @@ nodeinit: # -- Labels to be added to node-init pods. podLabels: {} - # -- Security Context for cilium-node-init pods. - podSecurityContext: - # -- AppArmorProfile options for the `cilium-node-init` and init containers - appArmorProfile: - type: "Unconfined" + # -- nodeinit resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: @@ -2808,7 +2755,7 @@ preflight: image: override: ~ repository: "quay.io/cilium/cilium" - tag: "v1.15.19" + tag: "v1.15.0-pre.3" # cilium-digest digest: "" useDigest: false @@ -2970,7 +2917,7 @@ clustermesh: image: override: ~ repository: "quay.io/cilium/clustermesh-apiserver" - tag: "v1.15.19" + tag: "v1.15.0-pre.3" # clustermesh-apiserver-digest digest: "" useDigest: false @@ -3058,6 +3005,9 @@ clustermesh: # NodePort will be redirected to a local backend, regardless of whether the # destination node belongs to the local or the remote cluster. nodePort: 32379 + # -- Optional loadBalancer IP address to use with type LoadBalancer. + # loadBalancerIP: + # -- Annotations for the clustermesh-apiserver # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 @@ -3069,21 +3019,6 @@ clustermesh: # -- The internalTrafficPolicy of service used for apiserver access. internalTrafficPolicy: - # @schema - # type: [null, string] - # @schema - # -- Configure a loadBalancerClass. - # Allows to configure the loadBalancerClass on the clustermesh-apiserver - # LB service in case the Service type is set to LoadBalancer - # (requires Kubernetes 1.24+). - loadBalancerClass: ~ - # @schema - # type: [null, string] - # @schema - # -- Configure a specific loadBalancerIP. - # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver - # LB service in case the Service type is set to LoadBalancer. - loadBalancerIP: ~ # -- Number of replicas run for the clustermesh-apiserver deployment. replicas: 1 @@ -3341,10 +3276,7 @@ cgroup: # memory: 128Mi # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) hostRoot: /run/cilium/cgroupv2 -# -- Configure sysctl override described in #20072. -sysctlfix: - # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. - enabled: true + # -- Configure whether to enable auto detect of terminating state for endpoints # in order to support graceful termination. enableK8sTerminatingEndpoint: true @@ -3357,8 +3289,6 @@ enableK8sTerminatingEndpoint: true agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" dnsProxy: - # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. - socketLingerTimeout: 10 # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. dnsRejectResponseCode: refused # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. @@ -3380,8 +3310,6 @@ dnsProxy: proxyPort: 0 # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. proxyResponseMaxDelay: 100ms - # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) - # enableTransparentMode: true # -- SCTP Configuration Values sctp: @@ -3421,14 +3349,12 @@ authentication: enabled: true # -- SPIRE namespace to install into namespace: cilium-spire - # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. - existingNamespace: false # -- init container image of SPIRE agent and server initImage: override: ~ repository: "docker.io/library/busybox" tag: "1.36.1" - digest: "sha256:7edf5efe6b86dbf01ccc3c76b32a37a8e23b84e6bad81ce8ae8c221fa456fda8" + digest: "sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b" useDigest: true pullPolicy: "IfNotPresent" # SPIRE agent configuration @@ -3437,8 +3363,8 @@ authentication: image: override: ~ repository: "ghcr.io/spiffe/spire-agent" - tag: "1.8.5" - digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b" + tag: "1.8.4" + digest: "sha256:d489bc8470d7a0f292e0e3576c3e7025253343dc798241bcfd9061828e2a6bef" useDigest: true pullPolicy: "IfNotPresent" # -- SPIRE agent service account @@ -3452,21 +3378,8 @@ authentication: # -- SPIRE Workload Attestor kubelet verification. skipKubeletVerification: true # -- SPIRE agent tolerations configuration - # By default it follows the same tolerations as the agent itself - # to allow the Cilium agent on this node to connect to SPIRE. # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" + tolerations: [] # -- SPIRE agent affinity configuration affinity: {} # -- SPIRE agent nodeSelector configuration @@ -3485,8 +3398,8 @@ authentication: image: override: ~ repository: "ghcr.io/spiffe/spire-server" - tag: "1.8.5" - digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428" + tag: "1.8.4" + digest: "sha256:bf79e0a921f8b8aa92602f7ea335616e72f7e91f939848e7ccc52d5bddfe96a1" useDigest: true pullPolicy: "IfNotPresent" # -- SPIRE server service account diff --git a/internal/constellation/helm/charts/cilium/values.yaml.tmpl b/internal/constellation/helm/charts/cilium/values.yaml.tmpl index 7fbed1be7..92a7ad759 100644 --- a/internal/constellation/helm/charts/cilium/values.yaml.tmpl +++ b/internal/constellation/helm/charts/cilium/values.yaml.tmpl @@ -215,10 +215,8 @@ extraConfig: {} annotations: {} # -- Security Context for cilium-agent pods. -podSecurityContext: - # -- AppArmorProfile options for the `cilium-agent` and init containers - appArmorProfile: - type: "Unconfined" +podSecurityContext: {} + # -- Annotations to be added to agent pods podAnnotations: {} @@ -235,9 +233,6 @@ resources: {} # cpu: 100m # memory: 512Mi -# -- resources & limits for the agent init containers -initResources: {} - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -467,17 +462,7 @@ bpf: # @default -- `524288` neighMax: ~ - # @schema - # type: [null, integer] - # @schema - # @default -- `16384` - # -- (int) Configures the maximum number of entries for the node table. - nodeMapMax: ~ - # -- Configure the maximum number of entries in endpoint policy map (per endpoint). - # @schema - # type: [null, integer] - # @schema policyMapMax: 16384 # -- (float64) Configure auto-sizing for all BPF maps based on available memory. @@ -815,21 +800,17 @@ encryption: # This option is only effective when encryption.type is set to "wireguard". nodeEncryption: false - # -- Configure the WireGuard strict mode. + # -- Configure the WireGuard Pod2Pod strict mode. strictMode: - # -- Enable WireGuard strict mode. + # -- Enable WireGuard Pod2Pod strict mode. enabled: false - # -- podCIDRList for the WireGuard strict mode. - podCIDRList: [] - - # -- nodeCIDRList for the WireGuard strict mode. - nodeCIDRList: [] + # -- CIDR for the WireGuard Pod2Pod strict mode. + cidr: "" # -- Allow dynamic lookup of remote node identities. # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. - # This is also required when control-plane nodes are exempted from node-to-node encryption. - allowRemoteNodeIdentities: true + allowRemoteNodeIdentities: false ipsec: # -- Name of the key file inside the Kubernetes secret configured via secretName. @@ -1154,7 +1135,7 @@ hubble: # # --set hubble.redact.enabled="true" # --set hubble.redact.kafka.apiKey="true" - apiKey: true + apiKey: false # -- An additional address for Hubble to listen to. # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that @@ -1308,12 +1289,6 @@ hubble: rollingUpdate: maxUnavailable: 1 - # -- Additional hubble-relay volumes. - extraVolumes: [] - - # -- Additional hubble-relay volumeMounts. - extraVolumeMounts: [] - # -- hubble-relay pod security context podSecurityContext: fsGroup: 65532 @@ -2072,10 +2047,6 @@ envoy: # -- Set Envoy upstream HTTP idle connection timeout seconds. # Does not apply to connections with pending requests. Default 60s idleTimeoutDurationSeconds: 60 - # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. - xffNumTrustedHopsL7PolicyIngress: 0 - # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. - xffNumTrustedHopsL7PolicyEgress: 0 # -- Envoy container image. image: @@ -2129,10 +2100,8 @@ envoy: annotations: {} # -- Security Context for cilium-envoy pods. - podSecurityContext: - # -- AppArmorProfile options for the `cilium-agent` and init containers - appArmorProfile: - type: "Unconfined" + podSecurityContext: {} + # -- Annotations to be added to envoy pods podAnnotations: {} @@ -2201,20 +2170,7 @@ envoy: labelSelector: matchLabels: k8s-app: cilium-envoy - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: cilium.io/no-schedule - operator: NotIn - values: - - "true" + # -- Node selector for cilium-envoy. nodeSelector: kubernetes.io/os: linux @@ -2235,16 +2191,12 @@ envoy: # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ~ - # -- Configure Cilium Envoy Prometheus options. - # Note that some of these apply to either cilium-agent or cilium-envoy. prometheus: # -- Enable prometheus metrics for cilium-envoy enabled: true serviceMonitor: # -- Enable service monitors. # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) - # Note that this setting applies to both cilium-envoy _and_ cilium-agent - # with Envoy enabled. enabled: false # -- Labels to add to ServiceMonitor cilium-envoy labels: {} @@ -2256,21 +2208,18 @@ envoy: # service monitors configured. # namespace: "" # -- Relabeling configs for the ServiceMonitor cilium-envoy - # or for cilium-agent with Envoy configured. relabelings: - sourceLabels: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy - # or for cilium-agent with Envoy configured. metricRelabelings: ~ # -- Serve prometheus metrics for cilium-envoy on the configured port port: "9964" # -- Enable use of the remote node identity. # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -# Deprecated without replacement in 1.15. To be removed in 1.16. remoteNodeIdentity: true # -- Enable resource quotas for priority classes used in the cluster. @@ -2708,8 +2657,6 @@ nodeinit: override: ~ repository: "${CILIUM_NODEINIT_REPO}" tag: "${CILIUM_NODEINIT_VERSION}" - digest: "${CILIUM_NODEINIT_DIGEST}" - useDigest: true pullPolicy: "${PULL_POLICY}" # -- The priority class to use for the nodeinit pod. @@ -2753,11 +2700,7 @@ nodeinit: # -- Labels to be added to node-init pods. podLabels: {} - # -- Security Context for cilium-node-init pods. - podSecurityContext: - # -- AppArmorProfile options for the `cilium-node-init` and init containers - appArmorProfile: - type: "Unconfined" + # -- nodeinit resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: @@ -3055,6 +2998,9 @@ clustermesh: # NodePort will be redirected to a local backend, regardless of whether the # destination node belongs to the local or the remote cluster. nodePort: 32379 + # -- Optional loadBalancer IP address to use with type LoadBalancer. + # loadBalancerIP: + # -- Annotations for the clustermesh-apiserver # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 @@ -3066,21 +3012,6 @@ clustermesh: # -- The internalTrafficPolicy of service used for apiserver access. internalTrafficPolicy: - # @schema - # type: [null, string] - # @schema - # -- Configure a loadBalancerClass. - # Allows to configure the loadBalancerClass on the clustermesh-apiserver - # LB service in case the Service type is set to LoadBalancer - # (requires Kubernetes 1.24+). - loadBalancerClass: ~ - # @schema - # type: [null, string] - # @schema - # -- Configure a specific loadBalancerIP. - # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver - # LB service in case the Service type is set to LoadBalancer. - loadBalancerIP: ~ # -- Number of replicas run for the clustermesh-apiserver deployment. replicas: 1 @@ -3338,10 +3269,7 @@ cgroup: # memory: 128Mi # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) hostRoot: /run/cilium/cgroupv2 -# -- Configure sysctl override described in #20072. -sysctlfix: - # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. - enabled: true + # -- Configure whether to enable auto detect of terminating state for endpoints # in order to support graceful termination. enableK8sTerminatingEndpoint: true @@ -3354,8 +3282,6 @@ enableK8sTerminatingEndpoint: true agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" dnsProxy: - # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. - socketLingerTimeout: 10 # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. dnsRejectResponseCode: refused # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. @@ -3377,8 +3303,6 @@ dnsProxy: proxyPort: 0 # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. proxyResponseMaxDelay: 100ms - # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) - # enableTransparentMode: true # -- SCTP Configuration Values sctp: @@ -3418,8 +3342,6 @@ authentication: enabled: true # -- SPIRE namespace to install into namespace: cilium-spire - # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. - existingNamespace: false # -- init container image of SPIRE agent and server initImage: override: ~ @@ -3449,21 +3371,8 @@ authentication: # -- SPIRE Workload Attestor kubelet verification. skipKubeletVerification: true # -- SPIRE agent tolerations configuration - # By default it follows the same tolerations as the agent itself - # to allow the Cilium agent on this node to connect to SPIRE. # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" + tolerations: [] # -- SPIRE agent affinity configuration affinity: {} # -- SPIRE agent nodeSelector configuration diff --git a/internal/constellation/helm/charts/coredns/Chart.yaml b/internal/constellation/helm/charts/coredns/Chart.yaml deleted file mode 100644 index bd531acd7..000000000 --- a/internal/constellation/helm/charts/coredns/Chart.yaml +++ /dev/null @@ -1,3 +0,0 @@ -apiVersion: v2 -name: kube-dns -version: 0.0.0 diff --git a/internal/constellation/helm/charts/coredns/templates/clusterrole.yaml b/internal/constellation/helm/charts/coredns/templates/clusterrole.yaml deleted file mode 100644 index 13d284c3c..000000000 --- a/internal/constellation/helm/charts/coredns/templates/clusterrole.yaml +++ /dev/null @@ -1,23 +0,0 @@ - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:coredns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch diff --git a/internal/constellation/helm/charts/coredns/templates/clusterrolebinding.yaml b/internal/constellation/helm/charts/coredns/templates/clusterrolebinding.yaml deleted file mode 100644 index ab35291a1..000000000 --- a/internal/constellation/helm/charts/coredns/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,13 +0,0 @@ - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:coredns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns -subjects: -- kind: ServiceAccount - name: coredns - namespace: kube-system diff --git a/internal/constellation/helm/charts/coredns/templates/configmap.yaml b/internal/constellation/helm/charts/coredns/templates/configmap.yaml deleted file mode 100644 index 03f06d623..000000000 --- a/internal/constellation/helm/charts/coredns/templates/configmap.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -data: - Corefile: | - .:53 { - errors - health { - lameduck 5s - } - ready - kubernetes {{ .Values.dnsDomain }} in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - cache 30 { - disable success {{ .Values.dnsDomain }} - disable denial {{ .Values.dnsDomain }} - } - loop - reload - loadbalance - } -kind: ConfigMap -metadata: - creationTimestamp: null - name: edg-coredns - namespace: kube-system diff --git a/internal/constellation/helm/charts/coredns/templates/deployment.yaml b/internal/constellation/helm/charts/coredns/templates/deployment.yaml deleted file mode 100644 index b7fd735df..000000000 --- a/internal/constellation/helm/charts/coredns/templates/deployment.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - creationTimestamp: null - labels: - k8s-app: kube-dns - name: coredns - namespace: kube-system -spec: - replicas: 2 - selector: - matchLabels: - k8s-app: kube-dns - strategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - creationTimestamp: null - labels: - k8s-app: kube-dns - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: - - kube-dns - topologyKey: kubernetes.io/hostname - weight: 100 - containers: - - args: - - -conf - - /etc/coredns/Corefile - image: '{{ .Values.image }}' - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - successThreshold: 1 - timeoutSeconds: 5 - name: coredns - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP - readinessProbe: - httpGet: - path: /ready - port: 8181 - scheme: HTTP - resources: - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: /etc/coredns - name: config-volume - readOnly: true - dnsPolicy: Default - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-cluster-critical - serviceAccountName: coredns - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 10 - volumes: - - configMap: - items: - - key: Corefile - path: Corefile - name: edg-coredns - name: config-volume -status: {} diff --git a/internal/constellation/helm/charts/coredns/templates/service.yaml b/internal/constellation/helm/charts/coredns/templates/service.yaml deleted file mode 100644 index ba243aa19..000000000 --- a/internal/constellation/helm/charts/coredns/templates/service.yaml +++ /dev/null @@ -1,33 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - name: kube-dns - namespace: kube-system - annotations: - prometheus.io/port: "9153" - prometheus.io/scrape: "true" - # Without this resourceVersion value, an update of the Service between versions will yield: - # Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update - resourceVersion: "0" -spec: - clusterIP: "{{ .Values.clusterIP }}" - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: 53 - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: 53 - - name: metrics - port: 9153 - protocol: TCP - targetPort: 9153 - selector: - k8s-app: kube-dns diff --git a/internal/constellation/helm/charts/coredns/templates/serviceaccount.yaml b/internal/constellation/helm/charts/coredns/templates/serviceaccount.yaml deleted file mode 100644 index 937b99fa5..000000000 --- a/internal/constellation/helm/charts/coredns/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: kube-system diff --git a/internal/constellation/helm/charts/coredns/values.yaml b/internal/constellation/helm/charts/coredns/values.yaml deleted file mode 100644 index c740ed71c..000000000 --- a/internal/constellation/helm/charts/coredns/values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -clusterIP: 10.96.0.10 -dnsDomain: cluster.local -image: registry.k8s.io/coredns/coredns:v1.12.0@sha256:40384aa1f5ea6bfdc77997d243aec73da05f27aed0c5e9d65bfa98933c519d97 diff --git a/internal/constellation/helm/charts/edgeless/constellation-services/charts/ccm/templates/gcp-cm.yaml b/internal/constellation/helm/charts/edgeless/constellation-services/charts/ccm/templates/gcp-cm.yaml index 220c3efc6..06a971465 100644 --- a/internal/constellation/helm/charts/edgeless/constellation-services/charts/ccm/templates/gcp-cm.yaml +++ b/internal/constellation/helm/charts/edgeless/constellation-services/charts/ccm/templates/gcp-cm.yaml @@ -5,11 +5,5 @@ metadata: name: gceconf namespace: {{ .Release.Namespace }} data: - gce.conf: | - [global] - project-id = {{.Values.GCP.projectID }} - use-metadata-server = true - node-tags = constellation-{{ .Values.GCP.uid }} - regional = true - token-url = nil # This forces use of GOOGLE_APPLICATION_CREDENTIALS. + gce.conf: "[global]\nproject-id = {{.Values.GCP.projectID }}\nuse-metadata-server = true\nnode-tags = constellation-{{ .Values.GCP.uid }}\nregional = true\n" {{- end -}} diff --git a/internal/constellation/helm/charts/edgeless/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/charts/edgeless/constellation-services/charts/join-service/templates/daemonset.yaml index f14515244..fe6460d4a 100644 --- a/internal/constellation/helm/charts/edgeless/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/charts/edgeless/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider={{ .Values.csp }} - --key-service-endpoint=key-service.{{ .Release.Namespace }}:{{ .Values.global.keyServicePort }} - --attestation-variant={{ .Values.attestationVariant }} - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: {{ .Values.global.serviceBasePath | quote }} name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: {{ .Values.joinServicePort }} name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/charts/edgeless/csi/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/Chart.yaml index 5301e51db..3fb20c87e 100644 --- a/internal/constellation/helm/charts/edgeless/csi/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/Chart.yaml @@ -9,11 +9,11 @@ dependencies: - name: snapshot-crds version: 6.2.2 - name: aws-csi-driver - version: 1.2.0 + version: 1.1.0 tags: - AWS - name: azuredisk-csi-driver - version: v1.4.0 + version: v1.3.0 tags: - Azure - name: cinder-config @@ -21,7 +21,7 @@ dependencies: tags: - OpenStack - name: gcp-compute-persistent-disk-csi-driver - version: 1.4.0 + version: 1.3.0 tags: - GCP - name: openstack-cinder-csi diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/CHANGELOG.md b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/CHANGELOG.md index 3daf61e5e..bc6aa0036 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/CHANGELOG.md +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/CHANGELOG.md @@ -1,120 +1,4 @@ # Helm chart -## v2.30.0 -* Bump driver version to `v1.30.0` -* Update voluemessnapshotcontents/status RBAC ([#1991](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1991), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Upgrade dependencies ([#2016](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2016), [@torredil](https://github.com/torredil)) - -## v2.29.1 -* Bump driver version to `v1.29.1` -* Remove `--reuse-values` deprecation warning - -## v2.29.0 -### Urgent Upgrade Notes -*(No, really, you MUST read this before you upgrade)* - -The EBS CSI Driver Helm chart no longer supports upgrading with `--reuse-values`. This chart will not test for `--reuse-values` compatibility and upgrading with `--reuse-values` will likely fail. Users of `--reuse-values` are strongly encouraged to migrate to `--reset-then-reuse-values`. - -For more information see [the deprecation announcement](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1864). - -### Other Changes -* Bump driver version to `v1.29.0` and sidecars to latest versions -* Add helm-tester enabled flag ([#1954](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1954), [@nunodomingues-td](https://github.com/nunodomingues-td)) - -## v2.28.1 -* Add `reservedVolumeAttachments` that overrides heuristic-determined reserved attachments via `--reserved-volume-attachments` CLI option from [PR #1919](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1919) through Helm ([#1939](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1939), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Add `additionalArgs` parameter to node daemonSet ([#1939](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1939), [@AndrewSirenko](https://github.com/AndrewSirenko)) - -## v2.28.0 -### Urgent Upgrade Notes -*(No, really, you MUST read this before you upgrade)* - -This is the last minor version of the EBS CSI Driver Helm chart to support upgrading with `--reuse-values`. Future versions of the chart (starting with `v2.29.0`) will not test for `--reuse-values` compatibility and upgrading with `--reuse-values` will likely fail. Users of `--reuse-values` are strongly encouraged to migrate to `--reset-then-reuse-values`. - -For more information see [the deprecation announcement](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1864). - -### Other Changes -* Bump driver version to `v1.28.0` and sidecars to latest versions -* Add labels to leases role used by EBS CSI controller ([#1914](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1914), [@cHiv0rz](https://github.com/cHiv0rz)) -* Enforce `linux` and `amd64` node affinity for helm tester pod ([#1922](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1922), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Add configuration for `DaemonSet` annotations ([#1923](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1923), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Incorporate KubeLinter recommended best practices for chart tester pod ([#1924](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1924), [@torredil](https://github.com/torredil)) -* Add configuration for chart tester pod image ([#1928](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1928), [@AndrewSirenko](https://github.com/AndrewSirenko)) - -## v2.27.0 -* Bump driver version to `v1.27.0` -* Add parameters for tuning revisionHistoryLimit and emptyDir volumes ([#1840](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1840), [@bodgit](https://github.com/bodgit)) - -## v2.26.1 -* Bump driver version to `v1.26.1` -* Bump sidecar container versions to fix [restart bug in external attacher, provisioner, resizer, snapshotter, and node-driver-registrar](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1875) ([#1886](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1886), [@AndrewSirenko](https://github.com/AndrewSirenko)) - -## v2.26.0 -* Bump driver version to `v1.26.0` -* Bump sidecar container versions ([#1867](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1867), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Add warning about --reuse-values deprecation to NOTES.txt ([#1865](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1865), [@ConnorJC3](https://github.com/ConnorJC3)) - -## v2.25.0 -* Bump driver version to `v1.25.0` -* Update default sidecar timeout values ([#1824](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1824), [@torredil](https://github.com/torredil)) -* Increase default QPS and worker threads of sidecars ([#1834](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1834), [@ConnorJC3](https://github.com/ConnorJC3)) -* Node-driver-registrar sidecar fixes ([#1815](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1815), [@jukie](https://github.com/jukie)) -* Suggest eks.amazonaws.com/role-arn in values.yaml if EKS IAM for SA is used ([#1804](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1804), [@tporeba](https://github.com/tporeba)) - -## v2.24.1 -* Bump driver version to `v1.24.1` -* Upgrade sidecar images - -## v2.24.0 -* Bump driver version to `v1.24.0` -* Add additionalClusterRoleRules to sidecar chart templates. ([#1757](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1757), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Allow passing template value for clusterName ([#1753](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1753), [@monicastanciu](https://github.com/monicastanciu)) -* Make hostNetwork configurable for daemonset ([#1716](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1716), [@bseenu](https://github.com/bseenu)) -* Add labels to volumesnapshotclass ([#1754](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1754), [@fad3t](https://github.com/fad3t)) -* Update default API version for PodDisruptionBudget ([#1751](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1751), [@AndrewSirenko](https://github.com/AndrewSirenko)) - -## v2.23.2 -* Bump driver version to `v1.23.2` -* Upgrade sidecar images - -## v2.23.1 -* Bump driver version to `v1.23.1` - -## v2.23.0 -* Add `node.enableLinux` parameter ([#1732](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1732), [@monicastanciu](https://github.com/monicastanciu)) -* Additional Node DaemonSets bug fixes ([#1739](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1739), [@monicastanciu](https://github.com/monicastanciu)) -* Additional DaemonSets feature ([#1722](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1722), [@ConnorJC3](https://github.com/ConnorJC3)) -* Add doc of chart value additionalArgs ([#1697](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1697), [@zitudu](https://github.com/zitudu)) - -## v2.22.1 -* Bump driver version to `v1.22.1` - -## v2.22.0 -* Default PodDisruptionBudget to policy/v1 ([#1707](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1707), [@iNoahNothing](https://github.com/iNoahNothing)) - -## v2.21.0 -* Bump driver version to `v1.21.0` -* Enable additional volume mounts on node pods ([#1670](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1670), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* Enable customization of aws-secret name and keys in Helm Chart ([#1668](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1668), [@AndrewSirenko](https://github.com/AndrewSirenko)) -* The sidecars have been updated. The new versions are: - - csi-snapshotter: `v6.2.2` - -## v2.20.0 -* Bump driver version to `v1.20.0` -* Enable leader election in csi-resizer sidecar ([#1606](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1606), [@rdpsin](https://github.com/rdpsin)) -* Namespace-scoped leases permissions ([#1614](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1614), [@torredil](https://github.com/torredil)) -* Add additionalArgs parameter for sidecars ([#1627](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1627), [@ConnorJC3](https://github.com/ConnorJC3)) -* Avoid generating manifests with empty envFrom fields ([#1630](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1630), [@mvgmb](https://github.com/mvgmb)) -* Allow to set automountServiceAccountToken in ServiceAccount ([#1619](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1619), [@kahirokunn](https://github.com/kahirokunn)) - -## v2.19.0 -* Bump driver version to `v1.19.0` -* The sidecars have been updated. The new versions are: - - csi-provisioner: `v3.5.0` - - csi-attacher: `v4.3.0` - - livenessprobe: `v2.10.0` - - csi-resizer: `v1.8.0` - - node-driver-registrar: `v2.8.0` -* Remove CPU limits ([#1596](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/1596), [@torredil](https://github.com/torredil)) ## v2.18.0 ### Urgent Upgrade Notes diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/Chart.yaml index c439f3ef6..fc4e85297 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -version: 1.2.0 -appVersion: "1.2.0" +version: 1.1.0 +appVersion: "1.1.0" description: AWS Container Storage Interface (CSI) Storage Plugin with on-node encryption support name: aws-csi-driver kubeVersion: ">=1.17.0-0" diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/NOTES.txt b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/NOTES.txt index cb3e6cecf..5d79084ec 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/NOTES.txt +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/NOTES.txt @@ -2,4 +2,4 @@ To verify that aws-ebs-csi-driver has started, run: kubectl get pod -n {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "aws-ebs-csi-driver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -NOTE: The [CSI Snapshotter](https://github.com/kubernetes-csi/external-snapshotter) controller and CRDs will no longer be installed as part of this chart and moving forward will be a prerequisite of using the snap shotting functionality. +NOTE: The [CSI Snapshotter](https://github.com/kubernetes-csi/external-snapshotter) controller and CRDs will no longer be installed as part of this chart and moving forward will be a prerequisite of using the snap shotting functionality. \ No newline at end of file diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node-windows.tpl b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node-windows.tpl deleted file mode 100644 index ab17f71e5..000000000 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node-windows.tpl +++ /dev/null @@ -1,262 +0,0 @@ -{{- define "node-windows" }} -{{- if .Values.node.enableWindows }} ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ printf "%s-windows" .NodeName }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} -spec: - {{- if or (kindIs "float64" .Values.node.revisionHistoryLimit) (kindIs "int64" .Values.node.revisionHistoryLimit) }} - revisionHistoryLimit: {{ .Values.node.revisionHistoryLimit }} - {{- end }} - selector: - matchLabels: - app: {{ .NodeName }} - {{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }} - updateStrategy: - {{ toYaml .Values.node.updateStrategy | nindent 4 }} - template: - metadata: - labels: - app: {{ .NodeName }} - {{- include "aws-ebs-csi-driver.labels" . | nindent 8 }} - {{- if .Values.node.podLabels }} - {{- toYaml .Values.node.podLabels | nindent 8 }} - {{- end }} - {{- with .Values.node.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.node.affinity }} - affinity: {{- toYaml . | nindent 8 }} - {{- end }} - nodeSelector: - kubernetes.io/os: windows - {{- with .Values.node.nodeSelector }} - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ .Values.node.serviceAccount.name }} - priorityClassName: {{ .Values.node.priorityClassName | default "system-node-critical" }} - tolerations: - {{- if .Values.node.tolerateAllTaints }} - - operator: Exists - {{- else }} - {{- with .Values.node.tolerations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.node.windowsHostProcess }} - securityContext: - windowsOptions: - hostProcess: true - runAsUserName: "NT AUTHORITY\\SYSTEM" - hostNetwork: true - {{- end }} - containers: - - name: ebs-plugin - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.image.repository (default (printf "v%s" .Chart.AppVersion) (toString .Values.image.tag)) }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.node.windowsHostProcess }} - command: - - "aws-ebs-csi-driver.exe" - {{- end }} - args: - - node - - --endpoint=$(CSI_ENDPOINT) - {{- with .Values.node.volumeAttachLimit }} - - --volume-attach-limit={{ . }} - {{- end }} - {{- with .Values.node.loggingFormat }} - - --logging-format={{ . }} - {{- end }} - - --v={{ .Values.node.logLevel }} - {{- if .Values.node.otelTracing }} - - --enable-otel-tracing=true - {{- end}} - {{- if .Values.node.windowsHostProcess }} - - --windows-host-process=true - {{- end }} - env: - - name: CSI_ENDPOINT - {{- if .Values.node.windowsHostProcess }} - value: unix://C:\\var\\lib\\kubelet\\plugins\\ebs.csi.aws.com\\csi.sock - {{- else }} - value: unix:/csi/csi.sock - {{- end }} - - name: CSI_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - {{- if .Values.proxy.http_proxy }} - {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} - {{- end }} - {{- with .Values.node.otelTracing }} - - name: OTEL_SERVICE_NAME - value: {{ .otelServiceName }} - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: {{ .otelExporterEndpoint }} - {{- end }} - {{- with .Values.node.env }} - {{- . | toYaml | nindent 12 }} - {{- end }} - volumeMounts: - - name: kubelet-dir - mountPath: C:\var\lib\kubelet - mountPropagation: "None" - - name: plugin-dir - mountPath: C:\csi - {{- if not .Values.node.windowsHostProcess }} - - name: csi-proxy-disk-pipe - mountPath: \\.\pipe\csi-proxy-disk-v1 - - name: csi-proxy-volume-pipe - mountPath: \\.\pipe\csi-proxy-volume-v1 - - name: csi-proxy-filesystem-pipe - mountPath: \\.\pipe\csi-proxy-filesystem-v1 - {{- end }} - ports: - - name: healthz - containerPort: 9808 - protocol: TCP - livenessProbe: - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 10 - timeoutSeconds: 3 - periodSeconds: 10 - failureThreshold: 5 - {{- with .Values.node.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if not .Values.node.windowsHostProcess }} - securityContext: - windowsOptions: - runAsUserName: "ContainerAdministrator" - {{- end }} - lifecycle: - preStop: - exec: - command: ["/bin/aws-ebs-csi-driver", "pre-stop-hook"] - - name: node-driver-registrar - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.nodeDriverRegistrar.image.repository .Values.sidecars.nodeDriverRegistrar.image.tag }} - imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.nodeDriverRegistrar.image.pullPolicy }} - {{- if .Values.node.windowsHostProcess }} - command: - - "csi-node-driver-registrar.exe" - {{- end }} - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - {{- if .Values.node.windowsHostProcess }} - - --plugin-registration-path=$(PLUGIN_REG_DIR) - {{- end }} - - --v={{ .Values.sidecars.nodeDriverRegistrar.logLevel }} - env: - - name: ADDRESS - {{- if .Values.node.windowsHostProcess }} - value: unix://C:\\var\\lib\\kubelet\\plugins\\ebs.csi.aws.com\\csi.sock - {{- else }} - value: unix:/csi/csi.sock - {{- end }} - - name: DRIVER_REG_SOCK_PATH - {{- if .Values.node.windowsHostProcess }} - value: C:\\var\\lib\\kubelet\\plugins\\ebs.csi.aws.com\\csi.sock - {{- else }} - value: C:\var\lib\kubelet\plugins\ebs.csi.aws.com\csi.sock - {{- end }} - {{- if .Values.node.windowsHostProcess }} - - name: PLUGIN_REG_DIR - value: C:\\var\\lib\\kubelet\\plugins_registry\\ - {{- end }} - {{- if .Values.proxy.http_proxy }} - {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.nodeDriverRegistrar.env }} - {{- . | toYaml | nindent 12 }} - {{- end }} - livenessProbe: - exec: - command: - - /csi-node-driver-registrar.exe - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --mode=kubelet-registration-probe - initialDelaySeconds: 30 - timeoutSeconds: 15 - periodSeconds: 90 - volumeMounts: - - name: plugin-dir - mountPath: C:\csi - - name: registration-dir - mountPath: C:\registration - - name: probe-dir - mountPath: C:\var\lib\kubelet\plugins\ebs.csi.aws.com - {{- with default .Values.node.resources .Values.sidecars.nodeDriverRegistrar.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - - name: liveness-probe - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.livenessProbe.image.repository .Values.sidecars.livenessProbe.image.tag }} - imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.livenessProbe.image.pullPolicy }} - {{- if .Values.node.windowsHostProcess }} - command: - - "livenessprobe.exe" - {{- end }} - args: - {{- if .Values.node.windowsHostProcess }} - - --csi-address=unix://C:\\var\\lib\\kubelet\\plugins\\ebs.csi.aws.com\\csi.sock - {{- else }} - - --csi-address=unix:/csi/csi.sock - {{- end }} - volumeMounts: - - name: plugin-dir - mountPath: C:\csi - {{- with default .Values.node.resources .Values.sidecars.livenessProbe.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - volumes: - - name: kubelet-dir - hostPath: - path: C:\var\lib\kubelet - type: Directory - - name: plugin-dir - hostPath: - path: C:\var\lib\kubelet\plugins\ebs.csi.aws.com - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: C:\var\lib\kubelet\plugins_registry - type: Directory - {{- if not .Values.node.windowsHostProcess }} - - name: csi-proxy-disk-pipe - hostPath: - path: \\.\pipe\csi-proxy-disk-v1 - type: "" - - name: csi-proxy-volume-pipe - hostPath: - path: \\.\pipe\csi-proxy-volume-v1 - type: "" - - name: csi-proxy-filesystem-pipe - hostPath: - path: \\.\pipe\csi-proxy-filesystem-v1 - type: "" - {{- end }} - - name: probe-dir - {{- if .Values.node.probeDirVolume }} - {{- toYaml .Values.node.probeDirVolume | nindent 10 }} - {{- else }} - emptyDir: {} - {{- end }} -{{- end }} -{{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node.tpl b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node.tpl deleted file mode 100644 index 4591f7efe..000000000 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/_node.tpl +++ /dev/null @@ -1,250 +0,0 @@ -{{- define "node" }} -{{- if or (eq (default true .Values.node.enableLinux) true) }} ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .NodeName }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} - {{- with .Values.node.daemonSetAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if or (kindIs "float64" .Values.node.revisionHistoryLimit) (kindIs "int64" .Values.node.revisionHistoryLimit) }} - revisionHistoryLimit: {{ .Values.node.revisionHistoryLimit }} - {{- end }} - selector: - matchLabels: - app: {{ .NodeName }} - {{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }} - updateStrategy: - {{- toYaml .Values.node.updateStrategy | nindent 4 }} - template: - metadata: - labels: - app: {{ .NodeName }} - {{- include "aws-ebs-csi-driver.labels" . | nindent 8 }} - {{- if .Values.node.podLabels }} - {{- toYaml .Values.node.podLabels | nindent 8 }} - {{- end }} - {{- with .Values.node.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.node.affinity }} - affinity: {{- toYaml . | nindent 8 }} - {{- end }} - nodeSelector: - kubernetes.io/os: linux - {{- with .Values.node.nodeSelector }} - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ .Values.node.serviceAccount.name }} - priorityClassName: {{ .Values.node.priorityClassName | default "system-node-critical" }} - tolerations: - {{- if .Values.node.tolerateAllTaints }} - - operator: Exists - {{- else }} - {{- with .Values.node.tolerations }} - {{- toYaml . | nindent 8 }} - {{- end }} - - key: "ebs.csi.aws.com/agent-not-ready" - operator: "Exists" - {{- end }} - hostNetwork: {{ .Values.node.hostNetwork }} - {{- with .Values.node.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - containers: - - name: ebs-plugin - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.image.repository (default (printf "v%s" .Chart.AppVersion) (toString .Values.image.tag)) }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: - - node - - --endpoint=$(CSI_ENDPOINT) - {{- with .Values.node.reservedVolumeAttachments }} - - --reserved-volume-attachments={{ . }} - {{- end }} - {{- with .Values.node.volumeAttachLimit }} - - --volume-attach-limit={{ . }} - {{- end }} - {{- with .Values.node.loggingFormat }} - - --logging-format={{ . }} - {{- end }} - - --v={{ .Values.node.logLevel }} - {{- if .Values.node.otelTracing }} - - --enable-otel-tracing=true - {{- end}} - {{- range .Values.node.additionalArgs }} - - {{ . }} - {{- end }} - env: - - name: CSI_ENDPOINT - value: unix:/csi/csi.sock - - name: CSI_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - {{- if .Values.proxy.http_proxy }} - {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} - {{- end }} - {{- with .Values.node.otelTracing }} - - name: OTEL_SERVICE_NAME - value: {{ .otelServiceName }} - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: {{ .otelExporterEndpoint }} - {{- end }} - {{- with .Values.node.env }} - {{- . | toYaml | nindent 12 }} - {{- end }} - {{- with .Values.controller.envFrom }} - envFrom: - {{- . | toYaml | nindent 12 }} - {{- end }} - volumeMounts: - - name: kubelet-dir - mountPath: {{ .Values.node.kubeletPath }} - mountPropagation: "Bidirectional" - - name: plugin-dir - mountPath: /csi - - name: device-dir - mountPath: /dev - - name: cryptsetup - mountPath: /run/cryptsetup - {{- with .Values.node.volumeMounts }} - {{- toYaml . | nindent 12 }} - {{- end }} - ports: - - name: healthz - containerPort: 9808 - protocol: TCP - livenessProbe: - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 10 - timeoutSeconds: 3 - periodSeconds: 10 - failureThreshold: 5 - {{- with .Values.node.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.node.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - lifecycle: - preStop: - exec: - command: ["/bin/aws-ebs-csi-driver", "pre-stop-hook"] - - name: node-driver-registrar - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.nodeDriverRegistrar.image.repository .Values.sidecars.nodeDriverRegistrar.image.tag }} - imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.nodeDriverRegistrar.image.pullPolicy }} - args: - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --v={{ .Values.sidecars.nodeDriverRegistrar.logLevel }} - {{- range .Values.sidecars.nodeDriverRegistrar.additionalArgs }} - - {{ . }} - {{- end }} - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: {{ printf "%s/plugins/ebs.csi.aws.com/csi.sock" (trimSuffix "/" .Values.node.kubeletPath) }} - {{- if .Values.proxy.http_proxy }} - {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.nodeDriverRegistrar.env }} - {{- . | toYaml | nindent 12 }} - {{- end }} - {{- with .Values.controller.envFrom }} - envFrom: - {{- . | toYaml | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.nodeDriverRegistrar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: plugin-dir - mountPath: /csi - - name: registration-dir - mountPath: /registration - - name: probe-dir - mountPath: {{ printf "%s/plugins/ebs.csi.aws.com/" (trimSuffix "/" .Values.node.kubeletPath) }} - {{- with default .Values.node.resources .Values.sidecars.nodeDriverRegistrar.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.nodeDriverRegistrar.securityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - - name: liveness-probe - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.livenessProbe.image.repository .Values.sidecars.livenessProbe.image.tag }} - imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.livenessProbe.image.pullPolicy }} - args: - - --csi-address=/csi/csi.sock - {{- range .Values.sidecars.livenessProbe.additionalArgs }} - - {{ . }} - {{- end }} - {{- with .Values.controller.envFrom }} - envFrom: - {{- . | toYaml | nindent 12 }} - {{- end }} - volumeMounts: - - name: plugin-dir - mountPath: /csi - {{- with default .Values.node.resources .Values.sidecars.livenessProbe.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.livenessProbe.securityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - volumes: - - name: kubelet-dir - hostPath: - path: {{ .Values.node.kubeletPath }} - type: Directory - - name: plugin-dir - hostPath: - path: {{ printf "%s/plugins/ebs.csi.aws.com/" (trimSuffix "/" .Values.node.kubeletPath) }} - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: {{ printf "%s/plugins_registry/" (trimSuffix "/" .Values.node.kubeletPath) }} - type: Directory - - name: device-dir - hostPath: - path: /dev - type: Directory - - name: cryptsetup - hostPath: - path: /run/cryptsetup - type: Directory - - name: probe-dir - {{- if .Values.node.probeDirVolume }} - {{- toYaml .Values.node.probeDirVolume | nindent 10 }} - {{- else }} - emptyDir: {} - {{- end }} - {{- with .Values.node.volumes }} - {{- toYaml . | nindent 8 }} - {{- end }} -{{- end }} -{{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-attacher.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-attacher.yaml index bff6577b3..816fdf66e 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-attacher.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-attacher.yaml @@ -21,6 +21,3 @@ rules: - apiGroups: [ "storage.k8s.io" ] resources: [ "volumeattachments/status" ] verbs: [ "patch" ] - {{- with .Values.sidecars.attacher.additionalClusterRoleRules }} - {{- . | toYaml | nindent 2 }} - {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-csi-node.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-csi-node.yaml index 2b7295aaf..3ca368efb 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-csi-node.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-csi-node.yaml @@ -8,10 +8,4 @@ metadata: rules: - apiGroups: [""] resources: ["nodes"] - verbs: ["get", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] verbs: ["get"] diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-provisioner.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-provisioner.yaml index b67c65844..0fb7ded0f 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-provisioner.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-provisioner.yaml @@ -30,12 +30,9 @@ rules: - apiGroups: [ "" ] resources: [ "nodes" ] verbs: [ "get", "list", "watch" ] + - apiGroups: [ "coordination.k8s.io" ] + resources: [ "leases" ] + verbs: [ "get", "watch", "list", "delete", "update", "create" ] - apiGroups: [ "storage.k8s.io" ] resources: [ "volumeattachments" ] verbs: [ "get", "list", "watch" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "volumeattributesclasses" ] - verbs: [ "get" ] - {{- with .Values.sidecars.provisioner.additionalClusterRoleRules }} - {{- . | toYaml | nindent 2 }} - {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-resizer.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-resizer.yaml index 81858af34..065f3aba2 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-resizer.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-resizer.yaml @@ -29,9 +29,3 @@ rules: - apiGroups: [ "" ] resources: [ "pods" ] verbs: [ "get", "list", "watch" ] - - apiGroups: [ "storage.k8s.io" ] - resources: [ "volumeattributesclasses" ] - verbs: [ "get", "list", "watch" ] - {{- with .Values.sidecars.resizer.additionalClusterRoleRules }} - {{- . | toYaml | nindent 2 }} - {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-snapshotter.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-snapshotter.yaml index 697e818d9..38e688a8a 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-snapshotter.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/clusterrole-snapshotter.yaml @@ -24,7 +24,4 @@ rules: verbs: [ "create", "get", "list", "watch", "update", "delete", "patch" ] - apiGroups: [ "snapshot.storage.k8s.io" ] resources: [ "volumesnapshotcontents/status" ] - verbs: [ "update", "patch" ] - {{- with .Values.sidecars.snapshotter.additionalClusterRoleRules }} - {{- . | toYaml | nindent 2 }} - {{- end }} + verbs: [ "update" ] diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/controller.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/controller.yaml index 89468b1ca..0d79331ac 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/controller.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/controller.yaml @@ -6,15 +6,8 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} - {{- with .Values.controller.deploymentAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} spec: replicas: {{ .Values.controller.replicaCount }} - {{- if or (kindIs "float64" .Values.controller.revisionHistoryLimit) (kindIs "int64" .Values.controller.revisionHistoryLimit) }} - revisionHistoryLimit: {{ .Values.controller.revisionHistoryLimit }} - {{- end }} {{- with .Values.controller.updateStrategy }} strategy: {{- toYaml . | nindent 4 }} @@ -33,7 +26,7 @@ spec: {{- end }} {{- if .Values.controller.podAnnotations }} annotations: - {{- tpl ( .Values.controller.podAnnotations | toYaml ) . | nindent 8 }} + {{- toYaml .Values.controller.podAnnotations | nindent 8 }} {{- end }} spec: nodeSelector: @@ -82,7 +75,7 @@ spec: {{- if .Values.controller.extraVolumeTags }} {{- include "aws-ebs-csi-driver.extra-volume-tags" . | nindent 12 }} {{- end }} - {{- with (tpl (default "" .Values.controller.k8sTagClusterId) . ) }} + {{- with .Values.controller.k8sTagClusterId }} - --k8s-tag-cluster-id={{ . }} {{- end }} {{- if and (.Values.controller.enableMetrics) (not .Values.controller.httpEndpoint) }} @@ -94,18 +87,9 @@ spec: {{- if .Values.controller.sdkDebugLog }} - --aws-sdk-debug-log=true {{- end}} - {{- if .Values.controller.batching }} - - --batching=true - {{- end}} {{- with .Values.controller.loggingFormat }} - --logging-format={{ . }} {{- end }} - {{- with .Values.controller.userAgentExtra }} - - --user-agent-extra={{ . }} - {{- end }} - {{- if .Values.controller.otelTracing }} - - --enable-otel-tracing=true - {{- end}} - --v={{ .Values.controller.logLevel }} {{- range .Values.controller.additionalArgs }} - {{ . }} @@ -117,20 +101,18 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - {{- with .Values.awsAccessSecret }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: - name: {{ .name }} - key: {{ .keyId }} + name: aws-secret + key: key_id optional: true - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: - name: {{ .name }} - key: {{ .accessKey }} + name: aws-secret + key: access_key optional: true - {{- end }} - name: AWS_EC2_ENDPOINT valueFrom: configMapKeyRef: @@ -147,16 +129,10 @@ spec: {{- with .Values.controller.env }} {{- . | toYaml | nindent 12 }} {{- end }} - {{- with .Values.controller.otelTracing }} - - name: OTEL_SERVICE_NAME - value: {{ .otelServiceName }} - - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: {{ .otelExporterEndpoint }} - {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ @@ -200,9 +176,6 @@ spec: image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.provisioner.image.repository .Values.sidecars.provisioner.image.tag }} imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.provisioner.image.pullPolicy }} args: - {{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.provisioner.additionalArgs)) }} - - --timeout=60s - {{- end }} - --csi-address=$(ADDRESS) - --v={{ .Values.sidecars.provisioner.logLevel }} - --feature-gates=Topology=true @@ -222,14 +195,6 @@ spec: {{- end }} {{- end }} - --default-fstype={{ .Values.controller.defaultFsType }} - {{- if not (regexMatch "(-kube-api-qps)|(-kube-api-burst)|(-worker-threads)" (join " " .Values.sidecars.provisioner.additionalArgs)) }} - - --kube-api-qps=20 - - --kube-api-burst=100 - - --worker-threads=100 - {{- end }} - {{- range .Values.sidecars.provisioner.additionalArgs }} - - {{ . }} - {{- end }} env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -239,10 +204,10 @@ spec: {{- with .Values.sidecars.provisioner.env }} {{- . | toYaml | nindent 12 }} {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ @@ -258,9 +223,6 @@ spec: image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.attacher.image.repository .Values.sidecars.attacher.image.tag }} imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.attacher.image.pullPolicy }} args: - {{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.attacher.additionalArgs)) }} - - --timeout=60s - {{- end }} - --csi-address=$(ADDRESS) - --v={{ .Values.sidecars.attacher.logLevel }} - --leader-election={{ .Values.sidecars.attacher.leaderElection.enabled | required "leader election state for csi-attacher is required, must be set to true || false." }} @@ -275,14 +237,6 @@ spec: - --leader-election-retry-period={{ .Values.sidecars.attacher.leaderElection.retryPeriod }} {{- end }} {{- end }} - {{- if not (regexMatch "(-kube-api-qps)|(-kube-api-burst)|(-worker-threads)" (join " " .Values.sidecars.attacher.additionalArgs)) }} - - --kube-api-qps=20 - - --kube-api-burst=100 - - --worker-threads=100 - {{- end }} - {{- range .Values.sidecars.attacher.additionalArgs }} - - {{ . }} - {{- end }} env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -292,10 +246,10 @@ spec: {{- with .Values.sidecars.attacher.env }} {{- . | toYaml | nindent 12 }} {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ @@ -317,14 +271,6 @@ spec: {{- if .Values.controller.extraCreateMetadata }} - --extra-create-metadata {{- end}} - {{- if not (regexMatch "(-kube-api-qps)|(-kube-api-burst)|(-worker-threads)" (join " " .Values.sidecars.snapshotter.additionalArgs)) }} - - --kube-api-qps=20 - - --kube-api-burst=100 - - --worker-threads=100 - {{- end }} - {{- range .Values.sidecars.snapshotter.additionalArgs }} - - {{ . }} - {{- end }} env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -334,10 +280,10 @@ spec: {{- with .Values.sidecars.snapshotter.env }} {{- . | toYaml | nindent 12 }} {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ @@ -350,94 +296,13 @@ spec: {{- toYaml . | nindent 12 }} {{- end }} {{- end }} - {{- if (.Values.controller.volumeModificationFeature).enabled }} - - name: volumemodifier - image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.volumemodifier.image.repository .Values.sidecars.volumemodifier.image.tag }} - imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.volumemodifier.image.pullPolicy }} - args: - {{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.volumemodifier.additionalArgs)) }} - - --timeout=60s - {{- end }} - - --csi-address=$(ADDRESS) - - --v={{ .Values.sidecars.volumemodifier.logLevel }} - - --leader-election={{ .Values.sidecars.volumemodifier.leaderElection.enabled | required "leader election state for csi-volumemodifier is required, must be set to true || false." }} - {{- if .Values.sidecars.volumemodifier.leaderElection.enabled }} - {{- if .Values.sidecars.volumemodifier.leaderElection.leaseDuration }} - - --leader-election-lease-duration={{ .Values.sidecars.volumemodifier.leaderElection.leaseDuration }} - {{- end }} - {{- if .Values.sidecars.volumemodifier.leaderElection.renewDeadline}} - - --leader-election-renew-deadline={{ .Values.sidecars.volumemodifier.leaderElection.renewDeadline }} - {{- end }} - {{- if .Values.sidecars.volumemodifier.leaderElection.retryPeriod }} - - --leader-election-retry-period={{ .Values.sidecars.volumemodifier.leaderElection.retryPeriod }} - {{- end }} - {{- end }} - {{- range .Values.sidecars.volumemodifier.additionalArgs }} - - {{ . }} - {{- end }} - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if .Values.proxy.http_proxy }} - {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.volumemodifier.env }} - {{- . | toYaml | nindent 12 }} - {{- end }} - {{- with .Values.controller.envFrom }} - envFrom: - {{- . | toYaml | nindent 12 }} - {{- end }} - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/sockets/pluginproxy/ - {{- with default .Values.controller.resources .Values.sidecars.volumemodifier.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.sidecars.volumemodifier.securityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} - name: csi-resizer image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.resizer.image.repository .Values.sidecars.resizer.image.tag }} imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.resizer.image.pullPolicy }} args: - {{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.resizer.additionalArgs)) }} - - --timeout=60s - {{- end }} - --csi-address=$(ADDRESS) - --v={{ .Values.sidecars.resizer.logLevel }} - --handle-volume-inuse-error=false - {{- with .Values.sidecars.resizer.leaderElection }} - - --leader-election={{ .enabled | default true }} - {{- if .leaseDuration }} - - --leader-election-lease-duration={{ .leaseDuration }} - {{- end }} - {{- if .renewDeadline }} - - --leader-election-renew-deadline={{ .renewDeadline }} - {{- end }} - {{- if .retryPeriod }} - - --leader-election-retry-period={{ .retryPeriod }} - {{- end }} - {{- end }} - {{- if not (regexMatch "(-kube-api-qps)|(-kube-api-burst)|(-workers)" (join " " .Values.sidecars.resizer.additionalArgs)) }} - - --kube-api-qps=20 - - --kube-api-burst=100 - - --workers=100 - {{- end }} - {{- range .Values.sidecars.resizer.additionalArgs }} - - {{ . }} - {{- end }} env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -447,10 +312,10 @@ spec: {{- with .Values.sidecars.resizer.env }} {{- . | toYaml | nindent 12 }} {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ @@ -467,13 +332,10 @@ spec: imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.livenessProbe.image.pullPolicy }} args: - --csi-address=/csi/csi.sock - {{- range .Values.sidecars.livenessProbe.additionalArgs }} - - {{ . }} - {{- end }} - {{- with .Values.controller.envFrom }} envFrom: + {{- with .Values.controller.envFrom }} {{- . | toYaml | nindent 12 }} - {{- end }} + {{- end }} volumeMounts: - name: socket-dir mountPath: /csi @@ -493,15 +355,7 @@ spec: {{- end }} volumes: - name: socket-dir - {{- if .Values.controller.socketDirVolume }} - {{- toYaml .Values.controller.socketDirVolume | nindent 10 }} - {{- else }} emptyDir: {} - {{- end }} {{- with .Values.controller.volumes }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.controller.dnsConfig }} - dnsConfig: - {{- toYaml .Values.controller.dnsConfig | nindent 4 }} - {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/ebs-csi-default-sc.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/ebs-csi-default-sc.yaml deleted file mode 100644 index a58595726..000000000 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/ebs-csi-default-sc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.defaultStorageClass.enabled }} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ebs-csi-default-sc - annotations: - storageclass.kubernetes.io/is-default-class: "true" -provisioner: ebs.csi.aws.com -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -{{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/metrics.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/metrics.yaml index d68bd7ab9..1dcdf4ddc 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/metrics.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/metrics.yaml @@ -37,6 +37,6 @@ spec: endpoints: - targetPort: 3301 path: /metrics - interval: {{ .Values.controller.serviceMonitor.interval | default "15s"}} + interval: 15s {{- end }} {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node-windows.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node-windows.yaml index 9a2c2c81a..921b51cfb 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node-windows.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node-windows.yaml @@ -1,13 +1,184 @@ -{{$defaultArgs := dict - "NodeName" "ebs-csi-node" -}} -{{- include "node-windows" (deepCopy $ | mustMerge $defaultArgs) -}} -{{- range $name, $values := .Values.additionalDaemonSets }} -{{$args := dict - "NodeName" (printf "ebs-csi-node-%s" $name) - "Values" (dict - "node" (deepCopy $.Values.node | mustMerge $values) - ) -}} -{{- include "node-windows" (deepCopy $ | mustMerge $args) -}} +{{- if .Values.node.enableWindows }} +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ebs-csi-node-windows + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: ebs-csi-node + {{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }} + updateStrategy: + {{ toYaml .Values.node.updateStrategy | nindent 4 }} + template: + metadata: + labels: + app: ebs-csi-node + {{- include "aws-ebs-csi-driver.labels" . | nindent 8 }} + {{- if .Values.node.podLabels }} + {{- toYaml .Values.node.podLabels | nindent 8 }} + {{- end }} + {{- with .Values.node.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.node.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + kubernetes.io/os: windows + {{- with .Values.node.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ .Values.node.serviceAccount.name }} + priorityClassName: {{ .Values.node.priorityClassName | default "system-node-critical" }} + tolerations: + {{- if .Values.node.tolerateAllTaints }} + - operator: Exists + {{- else }} + {{- with .Values.node.tolerations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: ebs-plugin + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.image.repository (default (printf "v%s" .Chart.AppVersion) (toString .Values.image.tag)) }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - node + - --endpoint=$(CSI_ENDPOINT) + {{- with .Values.node.volumeAttachLimit }} + - --volume-attach-limit={{ . }} + {{- end }} + {{- with .Values.node.loggingFormat }} + - --logging-format={{ . }} + {{- end }} + - --v={{ .Values.node.logLevel }} + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if .Values.proxy.http_proxy }} + {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} + {{- end }} + {{- with .Values.node.env }} + {{- . | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: kubelet-dir + mountPath: C:\var\lib\kubelet + mountPropagation: "None" + - name: plugin-dir + mountPath: C:\csi + - name: csi-proxy-disk-pipe + mountPath: \\.\pipe\csi-proxy-disk-v1 + - name: csi-proxy-volume-pipe + mountPath: \\.\pipe\csi-proxy-volume-v1 + - name: csi-proxy-filesystem-pipe + mountPath: \\.\pipe\csi-proxy-filesystem-v1 + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + {{- with .Values.node.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + - name: node-driver-registrar + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.nodeDriverRegistrar.image.repository .Values.sidecars.nodeDriverRegistrar.image.tag }} + imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.nodeDriverRegistrar.image.pullPolicy }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v={{ .Values.sidecars.nodeDriverRegistrar.logLevel }} + env: + - name: ADDRESS + value: unix:/csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\var\lib\kubelet\plugins\aws.csi.confidential.cloud\csi.sock + {{- if .Values.proxy.http_proxy }} + {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} + {{- end }} + {{- with .Values.sidecars.nodeDriverRegistrar.env }} + {{- . | toYaml | nindent 12 }} + {{- end }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + volumeMounts: + - name: plugin-dir + mountPath: C:\csi + - name: registration-dir + mountPath: C:\registration + - name: probe-dir + mountPath: C:\var\lib\kubelet\plugins\aws.csi.confidential.cloud + {{- with default .Values.node.resources .Values.sidecars.nodeDriverRegistrar.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + - name: liveness-probe + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.livenessProbe.image.repository .Values.sidecars.livenessProbe.image.tag }} + imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.livenessProbe.image.pullPolicy }} + args: + - --csi-address=unix:/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: C:\csi + {{- with default .Values.node.resources .Values.sidecars.livenessProbe.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + volumes: + - name: kubelet-dir + hostPath: + path: C:\var\lib\kubelet + type: Directory + - name: plugin-dir + hostPath: + path: C:\var\lib\kubelet\plugins\aws.csi.confidential.cloud + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: C:\var\lib\kubelet\plugins_registry + type: Directory + - name: csi-proxy-disk-pipe + hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + - name: csi-proxy-volume-pipe + hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + - name: csi-proxy-filesystem-pipe + hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + - name: probe-dir + emptyDir: {} {{- end }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node.yaml index a891513b6..ecc6412d8 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/node.yaml @@ -1,46 +1,206 @@ -{{$defaultArgs := dict - "NodeName" "ebs-csi-node" -}} -{{- include "node" (deepCopy $ | mustMerge $defaultArgs) -}} -{{- range $name, $values := .Values.additionalDaemonSets }} -{{$args := dict - "NodeName" (printf "ebs-csi-node-%s" $name) - "Values" (dict - "node" (deepCopy $.Values.node | mustMerge $values) - ) -}} -{{- include "node" (deepCopy $ | mustMerge $args) -}} -{{- end }} -{{- if .Values.a1CompatibilityDaemonSet }} -{{$args := dict - "NodeName" "ebs-csi-node-a1compat" - "Values" (dict - "image" (dict - "tag" (printf "%s-a1compat" (default (printf "v%s" .Chart.AppVersion) (.Values.image.tag | toString))) - ) - "node" (dict - "affinity" (dict - "nodeAffinity" (dict - "requiredDuringSchedulingIgnoredDuringExecution" (dict - "nodeSelectorTerms" (list - (dict "matchExpressions" (list - (dict - "key" "eks.amazonaws.com/compute-type" - "operator" "NotIn" - "values" (list "fargate") - ) - (dict - "key" "node.kubernetes.io/instance-type" - "operator" "In" - "values" (list "a1.medium" "a1.large" "a1.xlarge" "a1.2xlarge" "a1.4xlarge") - ) - )) - ) - ) - ) - ) - ) - ) -}} -{{- include "node" (deepCopy $ | mustMerge $args) -}} -{{- end }} +# Node Service +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ebs-csi-node + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: ebs-csi-node + {{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }} + updateStrategy: + {{- toYaml .Values.node.updateStrategy | nindent 4 }} + template: + metadata: + labels: + app: ebs-csi-node + {{- include "aws-ebs-csi-driver.labels" . | nindent 8 }} + {{- if .Values.node.podLabels }} + {{- toYaml .Values.node.podLabels | nindent 8 }} + {{- end }} + {{- with .Values.node.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.node.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.node.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ .Values.node.serviceAccount.name }} + priorityClassName: {{ .Values.node.priorityClassName | default "system-node-critical" }} + tolerations: + {{- if .Values.node.tolerateAllTaints }} + - operator: Exists + {{- else }} + {{- with .Values.node.tolerations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.node.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: ebs-plugin + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.image.repository (default (printf "v%s" .Chart.AppVersion) (toString .Values.image.tag)) }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - node + - --endpoint=$(CSI_ENDPOINT) + {{- with .Values.node.volumeAttachLimit }} + - --volume-attach-limit={{ . }} + {{- end }} + {{- with .Values.node.loggingFormat }} + - --logging-format={{ . }} + {{- end }} + - "--kms-addr={{ .Values.kms.keyServiceName }}.{{ .Values.kms.keyServiceNamespace | default .Release.Namespace }}:{{ .Values.kms.keyServicePort }}" + - --v={{ .Values.node.logLevel }} + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if .Values.proxy.http_proxy }} + {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} + {{- end }} + {{- with .Values.node.env }} + {{- . | toYaml | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.controller.envFrom }} + {{- . | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: kubelet-dir + mountPath: {{ .Values.node.kubeletPath }} + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + - name: cryptsetup + mountPath: /run/cryptsetup + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + {{- with .Values.node.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.node.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + - name: node-driver-registrar + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.nodeDriverRegistrar.image.repository .Values.sidecars.nodeDriverRegistrar.image.tag }} + imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.nodeDriverRegistrar.image.pullPolicy }} + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v={{ .Values.sidecars.nodeDriverRegistrar.logLevel }} + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ printf "%s/plugins/aws.csi.confidential.cloud/csi.sock" (trimSuffix "/" .Values.node.kubeletPath) }} + {{- if .Values.proxy.http_proxy }} + {{- include "aws-ebs-csi-driver.http-proxy" . | nindent 12 }} + {{- end }} + {{- with .Values.sidecars.nodeDriverRegistrar.env }} + {{- . | toYaml | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.controller.envFrom }} + {{- . | toYaml | nindent 12 }} + {{- end }} + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + timeoutSeconds: 15 + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: probe-dir + mountPath: {{ printf "%s/plugins/aws.csi.confidential.cloud/" (trimSuffix "/" .Values.node.kubeletPath) }} + {{- with default .Values.node.resources .Values.sidecars.nodeDriverRegistrar.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.sidecars.nodeDriverRegistrar.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + - name: liveness-probe + image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.sidecars.livenessProbe.image.repository .Values.sidecars.livenessProbe.image.tag }} + imagePullPolicy: {{ default .Values.image.pullPolicy .Values.sidecars.livenessProbe.image.pullPolicy }} + args: + - --csi-address=/csi/csi.sock + envFrom: + {{- with .Values.controller.envFrom }} + {{- . | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: plugin-dir + mountPath: /csi + {{- with default .Values.node.resources .Values.sidecars.livenessProbe.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.sidecars.livenessProbe.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + volumes: + - name: kubelet-dir + hostPath: + path: {{ .Values.node.kubeletPath }} + type: Directory + - name: plugin-dir + hostPath: + path: {{ printf "%s/plugins/aws.csi.confidential.cloud/" (trimSuffix "/" .Values.node.kubeletPath) }} + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: {{ printf "%s/plugins_registry/" (trimSuffix "/" .Values.node.kubeletPath) }} + type: Directory + - name: device-dir + hostPath: + path: /dev + type: Directory + - name: probe-dir + emptyDir: {} + - name: cryptsetup + hostPath: + path: /run/cryptsetup + type: Directory diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/poddisruptionbudget-controller.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/poddisruptionbudget-controller.yaml index 0a1e97cc0..6f73fa222 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/poddisruptionbudget-controller.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/poddisruptionbudget-controller.yaml @@ -1,4 +1,8 @@ +{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} kind: PodDisruptionBudget metadata: name: ebs-csi-controller diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/role-leases.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/role-leases.yaml deleted file mode 100644 index 1ec62bb49..000000000 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/role-leases.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: {{ .Release.Namespace }} - name: ebs-csi-leases-role - labels: - {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/rolebinding-leases.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/rolebinding-leases.yaml deleted file mode 100644 index 88fded8a3..000000000 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/rolebinding-leases.yaml +++ /dev/null @@ -1,15 +0,0 @@ -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: ebs-csi-leases-rolebinding - namespace: {{ .Release.Namespace }} - labels: - {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} -subjects: -- kind: ServiceAccount - name: {{ .Values.controller.serviceAccount.name }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: ebs-csi-leases-role - apiGroup: rbac.authorization.k8s.io diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-controller.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-controller.yaml index d819f5493..a5b1102b4 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-controller.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-controller.yaml @@ -15,7 +15,4 @@ metadata: #annotations: # eks.amazonaws.com/role-arn: arn::iam:::role/ebs-csi-role {{- end }} -{{- if .Values.controller.serviceAccount.automountServiceAccountToken }} -automountServiceAccountToken: {{ .Values.controller.serviceAccount.automountServiceAccountToken }} -{{- end }} {{- end -}} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-node.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-node.yaml index 9f3c7c7e1..fb85abedf 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-node.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/serviceaccount-csi-node.yaml @@ -10,7 +10,4 @@ metadata: annotations: {{- toYaml . | nindent 4 }} {{- end }} -{{- if .Values.node.serviceAccount.automountServiceAccountToken }} -automountServiceAccountToken: {{ .Values.node.serviceAccount.automountServiceAccountToken }} -{{- end }} {{- end -}} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/volumesnapshotclass.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/volumesnapshotclass.yaml index 59551898e..0db3046aa 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/volumesnapshotclass.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/templates/volumesnapshotclass.yaml @@ -8,9 +8,6 @@ metadata: {{- with .annotations }} annotations: {{- . | toYaml | trim | nindent 4 }} {{- end }} - {{- with .labels }} - labels: {{- . | toYaml | trim | nindent 4 }} - {{- end }} driver: aws.csi.confidential.cloud deletionPolicy: {{ .deletionPolicy }} {{- with .parameters }} diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/values.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/values.yaml index 0c4a68857..defdd4d83 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/values.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/values.yaml @@ -5,7 +5,7 @@ image: repository: ghcr.io/edgelesssys/constellation/aws-csi-driver # Overrides the image tag whose default is v{{ .Chart.AppVersion }} - tag: "v1.2.0@sha256:9477e8ed37989c46963d57d24de5ddbc7ab965b1685d709d06c1ae536b23b5b1" + tag: "v1.1.0@sha256:3e2c394f2397455516948efcc9a4b87cbaeabf14df31702d5905ce08a53995f2" pullPolicy: Always # -- Custom labels to add into metadata @@ -24,12 +24,8 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner - tag: "v4.0.1-eks-1-30-4@sha256:0cf0d02211632c6b947f110e9f3f13f782eea1cfb7b990191d78ad032b2c2d77" + tag: "v3.4.1-eks-1-26-7@sha256:adfcb04433d1824f62dde0365877d0f7b7a2eaebc45670cbab7e0c1f07ba0607" logLevel: 2 - # Additional parameters provided by external-provisioner. - additionalArgs: [] - # Grant additional permissions to external-provisioner - additionalClusterRoleRules: resources: {} # Tune leader lease election for csi-provisioner. # Leader election is on by default. @@ -49,7 +45,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher - tag: "v4.5.1-eks-1-30-4@sha256:d68034351f65101d2a8506a5c583c5c923238aa93ba9719e779c0eb6a4b33993" + tag: "v4.2.0-eks-1-26-7@sha256:4b0d6e8758a0213ec942381b9577d2b3e971b545dc9e3fb59973f7992763d85f" # Tune leader lease election for csi-attacher. # Leader election is on by default. leaderElection: @@ -61,10 +57,6 @@ sidecars: # renewDeadline: "10s" # retryPeriod: "5s" logLevel: 2 - # Additional parameters provided by external-attacher. - additionalArgs: [] - # Grant additional permissions to external-attacher - additionalClusterRoleRules: [] resources: {} securityContext: readOnlyRootFilesystem: true @@ -76,12 +68,8 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter - tag: "v7.0.2-eks-1-30-4@sha256:9a33488c2cd691d4df454fbc0118e532cbd8aacf99856bdf395507fdae2421dc" + tag: "v6.2.1-eks-1-26-7@sha256:b8071f45885f1838387edb04a1d164680dcec8d656de682624ddc59d30ba660b" logLevel: 2 - # Additional parameters provided by csi-snapshotter. - additionalArgs: [] - # Grant additional permissions to csi-snapshotter - additionalClusterRoleRules: [] resources: {} securityContext: readOnlyRootFilesystem: true @@ -90,9 +78,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe - tag: "v2.12.0-eks-1-30-4@sha256:665d64a8e1124ecd95e08626ddd140154be30a95c6574d423d66cf262d28cc9c" - # Additional parameters provided by livenessprobe. - additionalArgs: [] + tag: "v2.9.0-eks-1-26-7@sha256:d9e11b42ae5f4f2f7ea9034e68040997cdbb04ae9e188aa897f76ae92698d78a" resources: {} securityContext: readOnlyRootFilesystem: true @@ -102,22 +88,8 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer - tag: "v1.10.1-eks-1-30-4@sha256:2aef6bf851fc3fa8e03c7a3efc9d3adb2ae1cb1746f88fb8a7559f8ca44bf188" - # Tune leader lease election for csi-resizer. - # Leader election is on by default. - leaderElection: - enabled: true - # Optional values to tune lease behavior. - # The arguments provided must be in an acceptable time.ParseDuration format. - # Ref: https://pkg.go.dev/flag#Duration - # leaseDuration: "15s" - # renewDeadline: "10s" - # retryPeriod: "5s" + tag: "v1.7.0-eks-1-26-7@sha256:81672f19d1da5cdff8d2068d8d69776067a1e5c31537ab3282d95dff34d581b6" logLevel: 2 - # Additional parameters provided by external-resizer. - additionalArgs: [] - # Grant additional permissions to external-resizer - additionalClusterRoleRules: [] resources: {} securityContext: readOnlyRootFilesystem: true @@ -127,40 +99,8 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar - tag: "v2.10.1-eks-1-30-4@sha256:518ed9cba6258735a25d2b896dc65d34a41e22f6785550a7e24e2f2dbd6a48b5" + tag: "v2.7.0-eks-1-26-7@sha256:6ad0cae2ae91453f283a44e9b430e475b8a9fa3d606aec9a8b09596fffbcd2c9" logLevel: 2 - # Additional parameters provided by node-driver-registrar. - additionalArgs: [] - resources: {} - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - livenessProbe: - exec: - command: - - /csi-node-driver-registrar - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - - --mode=kubelet-registration-probe - initialDelaySeconds: 30 - periodSeconds: 90 - timeoutSeconds: 15 - volumemodifier: - env: [] - image: - pullPolicy: IfNotPresent - repository: public.ecr.aws/ebs-csi-driver/volume-modifier-for-k8s - tag: "v0.3.0@sha256:c9e4e64e721b8fea4ba34502ac9f8fb83bd1571117276f553ac4595e0c5a3bf8" - leaderElection: - enabled: true - # Optional values to tune lease behavior. - # The arguments provided must be in an acceptable time.ParseDuration format. - # Ref: https://pkg.go.dev/flag#Duration - # leaseDuration: "15s" - # renewDeadline: "10s" - # retryPeriod: "5s" - logLevel: 2 - # Additional parameters provided by volume-modifier-for-k8s. - additionalArgs: [] resources: {} securityContext: readOnlyRootFilesystem: true @@ -174,16 +114,7 @@ imagePullSecrets: [] nameOverride: fullnameOverride: -awsAccessSecret: - name: aws-secret - keyId: key_id - accessKey: access_key - controller: - batching: true - volumeModificationFeature: - enabled: false - # Additional parameters provided by aws-ebs-csi-driver controller. additionalArgs: [] sdkDebugLog: false loggingFormat: text @@ -234,15 +165,12 @@ controller: # Additional labels for ServiceMonitor object labels: release: prometheus - interval: "15s" # If set to true, AWS API call metrics will be exported to the following # TCP endpoint: "0.0.0.0:3301" # --- # ID of the Kubernetes cluster used for tagging provisioned EBS volumes (optional). k8sTagClusterId: logLevel: 2 - userAgentExtra: "helm" - deploymentAnnotations: {} nodeSelector: node-role.kubernetes.io/control-plane: "" podAnnotations: {} @@ -254,9 +182,6 @@ controller: # region: us-east-1 region: replicaCount: 2 - revisionHistoryLimit: 10 - socketDirVolume: - emptyDir: {} updateStrategy: type: RollingUpdate rollingUpdate: @@ -270,15 +195,13 @@ controller: cpu: 10m memory: 40Mi limits: + cpu: 100m memory: 256Mi serviceAccount: # A service account will be created for you if set to true. Set to false if you want to use your own. create: true name: ebs-csi-controller-sa annotations: {} - ## Enable if EKS IAM for SA is used - # eks.amazonaws.com/role-arn: arn::iam:::role/ebs-csi-role - automountServiceAccountToken: true tolerations: - key: CriticalAddonsOnly operator: Exists @@ -312,18 +235,8 @@ controller: runAsUser: 1000 runAsGroup: 1000 fsGroup: 1000 - # Add additional volume mounts on the controller with controller.volumes and controller.volumeMounts volumes: [] - # Add additional volumes to be mounted onto the controller: - # - name: custom-dir - # hostPath: - # path: /path/to/dir - # type: Directory volumeMounts: [] - # And add mount paths for those additional volumes: - # - name: custom-dir - # mountPath: /mount/path - # --- # securityContext on the controller container (see sidecars for securityContext on sidecar containers) containerSecurityContext: readOnlyRootFilesystem: true @@ -336,13 +249,6 @@ controller: # - name: wait # image: busybox # command: [ 'sh', '-c', "sleep 20" ] - # Enable opentelemetry tracing for the plugin running on the daemonset - otelTracing: {} - # otelServiceName: ebs-csi-controller - # otelExporterEndpoint: "http://localhost:4317" - - # Enable dnsConfig for the controller and node pods - dnsConfig: {} node: env: [] @@ -351,26 +257,16 @@ node: loggingFormat: text logLevel: 2 priorityClassName: - additionalArgs: [] affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/compute-type - operator: NotIn - values: - - fargate - - key: node.kubernetes.io/instance-type - operator: NotIn - values: - - a1.medium - - a1.large - - a1.xlarge - - a1.2xlarge - - a1.4xlarge + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate nodeSelector: {} - daemonSetAnnotations: {} podAnnotations: {} podLabels: {} tolerateAllTaints: true @@ -383,32 +279,19 @@ node: cpu: 10m memory: 40Mi limits: + cpu: 100m memory: 256Mi - revisionHistoryLimit: 10 - probeDirVolume: - emptyDir: {} serviceAccount: create: true name: ebs-csi-node-sa annotations: {} - ## Enable if EKS IAM for SA is used - # eks.amazonaws.com/role-arn: arn::iam:::role/ebs-csi-role - automountServiceAccountToken: true - # Enable the linux daemonset creation - enableLinux: true enableWindows: false - # The number of attachment slots to reserve for system use (and not to be used for CSI volumes) - # When this parameter is not specified (or set to -1), the EBS CSI Driver will attempt to determine the number of reserved slots via heuristic - # Cannot be specified at the same time as `node.volumeAttachLimit` - reservedVolumeAttachments: # The "maximum number of attachable volumes" per node - # Cannot be specified at the same time as `node.reservedVolumeAttachments` volumeAttachLimit: updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: "10%" - hostNetwork: false # securityContext on the node pod securityContext: # The node pod must be run as root to bind to the registration/driver sockets @@ -416,38 +299,10 @@ node: runAsUser: 0 runAsGroup: 0 fsGroup: 0 - # Add additional volume mounts on the node pods with node.volumes and node.volumeMounts - volumes: [] - # Add additional volumes to be mounted onto the node pods: - # - name: custom-dir - # hostPath: - # path: /path/to/dir - # type: Directory - volumeMounts: [] - # And add mount paths for those additional volumes: - # - name: custom-dir - # mountPath: /mount/path - # --- # securityContext on the node container (see sidecars for securityContext on sidecar containers) containerSecurityContext: readOnlyRootFilesystem: true privileged: true - # Enable opentelemetry tracing for the plugin running on the daemonset - otelTracing: {} - # otelServiceName: ebs-csi-node - # otelExporterEndpoint: "http://localhost:4317" - -additionalDaemonSets: - # Additional node DaemonSets, using the node config structure - # See docs/additional-daemonsets.md for more information - # - # example: - # nodeSelector: - # node.kubernetes.io/instance-type: c5.large - # volumeAttachLimit: 15 - -# Enable compatibility for the A1 instance family via use of an AL2-based image in a separate DaemonSet -# a1CompatibilityDaemonSet: true # Create Constellation default StorageClasses createStorageClass: true @@ -468,18 +323,12 @@ storageClasses: [] # parameters: # encrypted: "true" -defaultStorageClass: - enabled: false - volumeSnapshotClasses: [] # Add VolumeSnapshotClass resources like: # - name: ebs-vsc # # annotation metadata # annotations: # snapshot.storage.kubernetes.io/is-default-class: "true" -# # label metadata -# labels: -# my-label-is: supercool # # deletionPolicy must be specified # deletionPolicy: Delete # parameters: @@ -488,8 +337,3 @@ volumeSnapshotClasses: [] # Intended for use with older clusters that cannot easily replace the CSIDriver object # This parameter should always be false for new installations useOldCSIDriver: false - -helmTester: - enabled: true - # Supply a custom image to the ebs-csi-driver-test pod in helm-tester.yaml - image: "gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240311-b09cdeb92c-master" diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/Chart.yaml index 3ce5248a0..fbab5e66d 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: "v1.4.0" +appVersion: "v1.3.0" description: Azure disk Container Storage Interface (CSI) Storage Plugin with on-node encryption support name: azuredisk-csi-driver -version: v1.4.0 +version: v1.3.0 diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml index 645a4da75..f7ac6f2db 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml @@ -120,7 +120,6 @@ spec: - "-leader-election" - "--leader-election-namespace={{ .Release.Namespace }}" - "-v=2" - - "--timeout=1200s" env: - name: ADDRESS value: /csi/csi.sock @@ -158,11 +157,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s -{{- if eq .Values.controller.hostNetwork true }} - - --http-endpoint=localhost:{{ .Values.controller.livenessProbe.healthPort }} -{{- else }} - --health-port={{ .Values.controller.livenessProbe.healthPort }} -{{- end }} - --v=2 volumeMounts: - name: socket-dir @@ -202,29 +197,18 @@ spec: - "--enable-traffic-manager={{ .Values.controller.enableTrafficManager }}" - "--traffic-manager-port={{ .Values.controller.trafficManagerPort }}" - "--enable-otel-tracing={{ .Values.controller.otelTracing.enabled }}" - - "--check-disk-lun-collision=true" - {{- range $value := .Values.controller.extraArgs }} - - {{ $value | quote }} - {{- end }} ports: - - containerPort: {{ .Values.controller.metricsPort }} - name: metrics - protocol: TCP -{{- if ne .Values.controller.hostNetwork true }} - containerPort: {{ .Values.controller.livenessProbe.healthPort }} name: healthz protocol: TCP -{{- end }} + - containerPort: {{ .Values.controller.metricsPort }} + name: metrics + protocol: TCP livenessProbe: failureThreshold: 5 httpGet: path: /healthz -{{- if eq .Values.controller.hostNetwork true }} - host: localhost - port: {{ .Values.controller.livenessProbe.healthPort }} -{{- else }} port: healthz -{{- end }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml index 9d9c368d9..50d3b795c 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/templates/csi-azuredisk-node.yaml @@ -74,11 +74,7 @@ spec: args: - --csi-address=/csi/csi.sock - --probe-timeout=3s -{{- if eq .Values.linux.hostNetwork true }} - - --http-endpoint=localhost:{{ .Values.node.livenessProbe.healthPort }} -{{- else }} - --health-port={{ .Values.node.livenessProbe.healthPort }} -{{- end }} - --v=2 resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }} - name: node-driver-registrar @@ -135,22 +131,15 @@ spec: - "--get-nodeid-from-imds={{ .Values.node.getNodeIDFromIMDS }}" - "--enable-otel-tracing={{ .Values.linux.otelTracing.enabled }}" - "--kms-addr={{ .Values.global.keyServiceName }}.{{ .Values.global.keyServiceNamespace | default .Release.Namespace }}:{{ .Values.global.keyServicePort }}" -{{- if ne .Values.linux.hostNetwork true }} ports: - containerPort: {{ .Values.node.livenessProbe.healthPort }} name: healthz protocol: TCP -{{- end }} livenessProbe: failureThreshold: 5 httpGet: path: /healthz -{{- if eq .Values.linux.hostNetwork true }} - host: localhost - port: {{ .Values.node.livenessProbe.healthPort }} -{{- else }} port: healthz -{{- end }} initialDelaySeconds: 30 timeoutSeconds: 10 periodSeconds: 30 diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/values.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/values.yaml index 18faf65c3..944663770 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/values.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/values.yaml @@ -2,27 +2,27 @@ image: baseRepo: mcr.microsoft.com azuredisk: repository: ghcr.io/edgelesssys/constellation/azure-csi-driver - tag: v1.4.0@sha256:e41b09d2735cb7410e2bf7abe9ca2166aa5a949d6c6e2ac570773b5d041797f1 + tag: v1.3.0@sha256:1e798f066ef78c293c4c87a31677f8948be4c8709980135969b73a9d7a46ca71 pullPolicy: IfNotPresent csiProvisioner: repository: /oss/kubernetes-csi/csi-provisioner - tag: v4.0.0@sha256:beadfb2cfa02f8bbb2efd88261a673023527cf51ebe7894daef82c4d928264a5 + tag: v3.5.0@sha256:fdf70099aa1538d1c2164976cf6d158ef8b3a5ee63db10bf0085de4ec66f59b4 pullPolicy: IfNotPresent csiAttacher: repository: /oss/kubernetes-csi/csi-attacher - tag: v4.5.0@sha256:172a9140780701b2223b7296729fc6cc3be8c86d0cfd2d0452e495f5ea28f51f + tag: v4.3.0@sha256:4306b80bfe8caea3fe53f6d1c15807c745be3072553ff508fc4f61da8f4a0c10 pullPolicy: IfNotPresent csiResizer: repository: /oss/kubernetes-csi/csi-resizer - tag: v1.9.3@sha256:e20dc798f529436d2c861dd66bc7fcfa17623b562a2a65474aab38fb77c9824a + tag: v1.8.0@sha256:6f0e8c9f3d0bdcf7a5fb5e404276ffac624033099d7687c8080692bcb6d13cd1 pullPolicy: IfNotPresent livenessProbe: repository: /oss/kubernetes-csi/livenessprobe - tag: v2.12.0@sha256:c762188c45d1b9bc9144b694b85313d5e49c741935a81d5b94fd7db978a40ae1 + tag: v2.10.0@sha256:3aeac313cffdb7db80b733539427f2533a3f662bf538e7b6434b0f898ceb701b pullPolicy: IfNotPresent nodeDriverRegistrar: repository: /oss/kubernetes-csi/csi-node-driver-registrar - tag: v2.10.0@sha256:136e3a4a5897f111d1dedd404a5717ee7ff2f215e5fe878abdf4ce00c2292280 + tag: v2.8.0@sha256:af6bf1b5ff310d4dc02cf8276be9b06014318f7ee31238b5fa278febd1a10ca9 pullPolicy: IfNotPresent serviceAccount: @@ -140,11 +140,11 @@ snapshot: image: csiSnapshotter: repository: /oss/kubernetes-csi/csi-snapshotter - tag: v6.3.3 + tag: v6.2.2 pullPolicy: IfNotPresent csiSnapshotController: repository: /oss/kubernetes-csi/snapshot-controller - tag: v6.3.3 + tag: v6.2.2 pullPolicy: IfNotPresent snapshotController: name: csi-snapshot-controller diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/Chart.yaml index 0380cc531..ed8008238 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 1.4.0 -appVersion: "v1.4.0" +version: 1.3.0 +appVersion: "v1.3.0" description: GCP Compute Persistent Disk Container Storage Interface (CSI) Storage Plugin with on-node encryption support name: gcp-compute-persistent-disk-csi-driver diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/values.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/values.yaml index 2ac7e6b5e..e620dde5a 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/values.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/values.yaml @@ -1,28 +1,28 @@ image: csiProvisioner: repo: registry.k8s.io/sig-storage/csi-provisioner - tag: v3.6.3@sha256:10624570c0aceb03f55f1eb07147b0c537e4676869cca2e9bd4bab113f810ac4 + tag: v3.4.0@sha256:e468dddcd275163a042ab297b2d8c2aca50d5e148d2d22f3b6ba119e2f31fa79 pullPolicy: IfNotPresent csiAttacher: repo: registry.k8s.io/sig-storage/csi-attacher - tag: v4.4.3@sha256:d7325367ab72b2d469a5091d87b4fc01142d2d13d1a28b2defbbe3e6fdbc4611 + tag: v4.2.0@sha256:34cf9b32736c6624fc9787fb149ea6e0fbeb45415707ac2f6440ac960f1116e6 pullPolicy: IfNotPresent csiResizer: repo: registry.k8s.io/sig-storage/csi-resizer - tag: v1.9.3@sha256:3c116f543f0590aeff3299c8bb0683f250817d11a77d9e9071b15a0bffdabcd9 + tag: v1.7.0@sha256:3a7bdf5d105783d05d0962fa06ca53032b01694556e633f27366201c2881e01d pullPolicy: IfNotPresent csiSnapshotter: repo: registry.k8s.io/sig-storage/csi-snapshotter - tag: v6.3.3@sha256:f1bd6ee18c4021c1c94f29edfab89b49b6a4d1b800936c19dbef2d75f8202f2d + tag: v6.1.0@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f pullPolicy: IfNotPresent csiNodeRegistrar: repo: registry.k8s.io/sig-storage/csi-node-driver-registrar - tag: v2.9.3@sha256:0f64602ea791246712b51df334bbd701a0f31df9950a4cb9c28c059f367baa9e + tag: v2.7.0@sha256:4a4cae5118c4404e35d66059346b7fa0835d7e6319ff45ed73f4bba335cf5183 pullPolicy: IfNotPresent gcepdDriver: repo: ghcr.io/edgelesssys/constellation/gcp-csi-driver # CSI driver version is independent of Constellation releases - tag: v1.4.0@sha256:53d608aa03dd07059bc04e1f8c64e2feb6fceff50fb0cbe276d31a8652a19bac + tag: v1.3.0@sha256:0ecb68f348ed6c287075db00f9c5ea731e7e2db9f2f7511b65391fb6856fe11a pullPolicy: IfNotPresent csiController: diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/Chart.yaml index be3dde48a..a6aacc946 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/Chart.yaml @@ -1,9 +1,6 @@ apiVersion: v2 name: snapshot-controller -description: | - A chart to deploy the CSI snapshot controller and webhook - Snapshot controller source: https://github.com/kubernetes-csi/external-snapshotter/tree/v8.0.1/deploy/kubernetes/snapshot-controller - Snapshot validating webhook source: https://github.com/kubernetes-csi/external-snapshotter/tree/v8.0.1/deploy/kubernetes/webhook-example +description: A chart to deploy the CSI snapshot controller and webhook type: application -version: 8.0.1 -appVersion: "8.0.1" +version: 6.2.2 +appVersion: "6.2.2" diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/admission-configuration.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/admission-configuration.yaml index ab6159704..95e26f473 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/admission-configuration.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/admission-configuration.yaml @@ -1,6 +1,3 @@ -# Snapshot validating webhook configuration -# Adapted from https://github.com/kubernetes-csi/external-snapshotter/tree/v8.0.1/deploy/kubernetes/webhook-example -# to use cert-manager for serving certificates apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: @@ -13,7 +10,7 @@ webhooks: - apiGroups: ["snapshot.storage.k8s.io"] apiVersions: ["v1"] operations: ["CREATE", "UPDATE"] - resources: ["volumesnapshotclasses"] + resources: ["volumesnapshots", "volumesnapshotcontents", "volumesnapshotclasses"] scope: "*" clientConfig: service: diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/serving-cert.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/serving-cert.yaml index 00a1935e4..93c24cec6 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/serving-cert.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/serving-cert.yaml @@ -6,7 +6,7 @@ metadata: spec: dnsNames: - 'snapshot-validation-service.{{ .Release.Namespace }}.svc' - - 'snapshot-validation-service.{{ .Release.Namespace }}.svc.cluster.local' + - 'snapshot-validation-service.{{ .Release.Namespace }}.svc.{{ .Values.kubernetesClusterDomain }}' issuerRef: kind: Issuer name: snapshot-validation-selfsigned-issuer diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-controller.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-controller.yaml index bd244b0db..ae8dbcc4e 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-controller.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-controller.yaml @@ -16,11 +16,10 @@ spec: selector: matchLabels: app: snapshot-controller - # The snapshot controller won't be marked as ready if the v1 CRDs are unavailable. - # The flag --retry-crd-interval-max is used to determine how long the controller - # will wait for the CRDs to become available before exiting. The default is 30 seconds - # so minReadySeconds should be set slightly higher than the flag value. - minReadySeconds: 35 + # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable + # in #504 the snapshot-controller will exit after around 7.5 seconds if it + # can't find the v1 CRDs so this value should be greater than that + minReadySeconds: 15 strategy: rollingUpdate: maxSurge: 0 diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-webhook.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-webhook.yaml index 8c93b51fc..861f284d7 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-webhook.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/templates/snapshot-webhook.yaml @@ -1,6 +1,3 @@ -# Snapshot validating webhook configuration -# Adapted from https://github.com/kubernetes-csi/external-snapshotter/tree/v8.0.1/deploy/kubernetes/webhook-example -# to use cert-manager for serving certificates --- apiVersion: apps/v1 kind: Deployment diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/values.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/values.yaml index 88e453caf..9c2f219b5 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/values.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-controller/values.yaml @@ -1,14 +1,15 @@ +kubernetesClusterDomain: cluster.local snapshotController: replicas: 2 snapshotController: image: repository: registry.k8s.io/sig-storage/snapshot-controller - tag: v8.2.1@sha256:472fa35a89dadb5a715454fad576ec11aa6f2e8378fc09ae26473d139b77c437 + tag: v6.2.2@sha256:fb95b65bb88f319f0f7d5397c401a654164f11a191f466b4026fa36085c7141b imagePullPolicy: IfNotPresent snapshotWebhook: replicas: 1 webhook: image: repository: registry.k8s.io/sig-storage/snapshot-validation-webhook - tag: v8.1.1@sha256:979842f9a6c23ae1b2ddd26603c27412dfc4d3c027d9cda1cb87a67b91ae9ac8 + tag: v6.2.2@sha256:b5be1e04b7c43352f83e135bd772de05437f8f3a20cb9437875d1a0d4f127440 imagePullPolicy: IfNotPresent diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/Chart.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/Chart.yaml index aa7a27fe1..fd0fc7ae2 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/Chart.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v2 name: snapshot-crds -description: "A chart to deploy CSI snapshot CRDs. Source: https://github.com/kubernetes-csi/external-snapshotter/tree/v8.0.1/client/config/crd" - +description: A chart to deploy CSI snapshot CRDs type: application -version: 8.0.1 -appVersion: "8.0.1" +version: 6.2.2 +appVersion: "6.2.2" diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotclasses.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotclasses.yaml index 8164952a4..56a8e1487 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotclasses.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotclasses.yaml @@ -3,8 +3,9 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + controller-gen.kubebuilder.io/version: v0.11.3 api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" - controller-gen.kubebuilder.io/version: v0.15.0 + creationTimestamp: null name: volumesnapshotclasses.snapshot.storage.k8s.io spec: group: snapshot.storage.k8s.io @@ -33,52 +34,42 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - VolumeSnapshotClass specifies parameters that a underlying storage system uses when - creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its - name in a VolumeSnapshot object. - VolumeSnapshotClasses are non-namespaced + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string deletionPolicy: - description: |- - deletionPolicy determines whether a VolumeSnapshotContent created through - the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. - Supported values are "Retain" and "Delete". - "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. + description: deletionPolicy determines whether a VolumeSnapshotContent + created through the VolumeSnapshotClass should be deleted when its bound + VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". + "Retain" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. enum: - Delete - Retain type: string driver: - description: |- - driver is the name of the storage driver that handles this VolumeSnapshotClass. - Required. + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - metadata: - type: object parameters: additionalProperties: type: string - description: |- - parameters is a key-value map with storage driver specific parameters for creating snapshots. - These values are opaque to Kubernetes. + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. type: object required: - deletionPolicy diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotcontents.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotcontents.yaml index cd0c879fc..d6181ed93 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotcontents.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshotcontents.yaml @@ -3,8 +3,9 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/955" + controller-gen.kubebuilder.io/version: v0.11.3 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + creationTimestamp: null name: volumesnapshotcontents.snapshot.storage.k8s.io spec: group: snapshot.storage.k8s.io @@ -47,8 +48,7 @@ spec: jsonPath: .spec.volumeSnapshotRef.name name: VolumeSnapshot type: string - - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent - object is bound. + - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. jsonPath: .spec.volumeSnapshotRef.namespace name: VolumeSnapshotNamespace type: string @@ -58,206 +58,152 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - VolumeSnapshotContent represents the actual "on-disk" snapshot object in the - underlying storage system + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - metadata: - type: object spec: - description: |- - spec defines properties of a VolumeSnapshotContent created by the underlying storage system. - Required. + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. properties: deletionPolicy: - description: |- - deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on - the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. - Supported values are "Retain" and "Delete". - "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. - "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. - For dynamically provisioned snapshots, this field will automatically be filled in by the - CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding - VolumeSnapshotClass. - For pre-existing snapshots, users MUST specify this field when creating the - VolumeSnapshotContent object. - Required. + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. + "Delete" means that the VolumeSnapshotContent and its physical snapshot + on underlying storage system are deleted. For dynamically provisioned + snapshots, this field will automatically be filled in by the CSI + snapshotter sidecar with the "DeletionPolicy" field defined in the + corresponding VolumeSnapshotClass. For pre-existing snapshots, users + MUST specify this field when creating the VolumeSnapshotContent + object. Required. enum: - Delete - Retain type: string driver: - description: |- - driver is the name of the CSI driver used to create the physical snapshot on - the underlying storage system. - This MUST be the same as the name returned by the CSI GetPluginName() call for - that driver. - Required. + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be + the same as the name returned by the CSI GetPluginName() call for + that driver. Required. type: string source: - description: |- - source specifies whether the snapshot is (or should be) dynamically provisioned - or already exists, and just requires a Kubernetes object representation. - This field is immutable after creation. - Required. + description: source specifies whether the snapshot is (or should be) + dynamically provisioned or already exists, and just requires a Kubernetes + object representation. This field is immutable after creation. Required. properties: snapshotHandle: - description: |- - snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on - the underlying storage system for which a Kubernetes object representation - was (or should be) created. - This field is immutable. + description: snapshotHandle specifies the CSI "snapshot_id" of + a pre-existing snapshot on the underlying storage system for + which a Kubernetes object representation was (or should be) + created. This field is immutable. type: string - x-kubernetes-validations: - - message: snapshotHandle is immutable - rule: self == oldSelf volumeHandle: - description: |- - volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot - should be dynamically taken from. + description: volumeHandle specifies the CSI "volume_id" of the + volume from which a snapshot should be dynamically taken from. This field is immutable. type: string - x-kubernetes-validations: - - message: volumeHandle is immutable - rule: self == oldSelf type: object - x-kubernetes-validations: - - message: volumeHandle is required once set - rule: '!has(oldSelf.volumeHandle) || has(self.volumeHandle)' - - message: snapshotHandle is required once set - rule: '!has(oldSelf.snapshotHandle) || has(self.snapshotHandle)' - - message: exactly one of volumeHandle and snapshotHandle must be - set - rule: (has(self.volumeHandle) && !has(self.snapshotHandle)) || (!has(self.volumeHandle) - && has(self.snapshotHandle)) + oneOf: + - required: ["snapshotHandle"] + - required: ["volumeHandle"] sourceVolumeMode: - description: |- - SourceVolumeMode is the mode of the volume whose snapshot is taken. - Can be either “Filesystem” or “Block”. - If not specified, it indicates the source volume's mode is unknown. - This field is immutable. - This field is an alpha field. + description: SourceVolumeMode is the mode of the volume whose snapshot + is taken. Can be either “Filesystem” or “Block”. If not specified, + it indicates the source volume's mode is unknown. This field is + immutable. This field is an alpha field. type: string - x-kubernetes-validations: - - message: sourceVolumeMode is immutable - rule: self == oldSelf volumeSnapshotClassName: - description: |- - name of the VolumeSnapshotClass from which this snapshot was (or will be) - created. - Note that after provisioning, the VolumeSnapshotClass may be deleted or - recreated with different set of values, and as such, should not be referenced - post-snapshot creation. + description: name of the VolumeSnapshotClass from which this snapshot + was (or will be) created. Note that after provisioning, the VolumeSnapshotClass + may be deleted or recreated with different set of values, and as + such, should not be referenced post-snapshot creation. type: string volumeSnapshotRef: - description: |- - volumeSnapshotRef specifies the VolumeSnapshot object to which this - VolumeSnapshotContent object is bound. - VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to - this VolumeSnapshotContent's name for the bidirectional binding to be valid. - For a pre-existing VolumeSnapshotContent object, name and namespace of the - VolumeSnapshot object MUST be provided for binding to happen. - This field is immutable after creation. + description: volumeSnapshotRef specifies the VolumeSnapshot object + to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be + provided for binding to happen. This field is immutable after creation. Required. properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - TODO: this design is not final and this field is subject to change in the future. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: both spec.volumeSnapshotRef.name and spec.volumeSnapshotRef.namespace - must be set - rule: has(self.name) && has(self.__namespace__) required: - deletionPolicy - driver - source - volumeSnapshotRef type: object - x-kubernetes-validations: - - message: sourceVolumeMode is required once set - rule: '!has(oldSelf.sourceVolumeMode) || has(self.sourceVolumeMode)' status: description: status represents the current information of a snapshot. properties: creationTime: - description: |- - creationTime is the timestamp when the point-in-time snapshot is taken - by the underlying storage system. - In dynamic snapshot creation case, this field will be filled in by the - CSI snapshotter sidecar with the "creation_time" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "creation_time" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - If not specified, it indicates the creation time is unknown. - The format of this field is a Unix nanoseconds time encoded as an int64. - On Unix, the command `date +%s%N` returns the current time in nanoseconds - since 1970-01-01 00:00:00 UTC. + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the CSI snapshotter + sidecar with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it indicates + the creation time is unknown. The format of this field is a Unix + nanoseconds time encoded as an int64. On Unix, the command `date + +%s%N` returns the current time in nanoseconds since 1970-01-01 + 00:00:00 UTC. format: int64 type: integer error: - description: |- - error is the last observed error during snapshot creation, if any. - Upon success after retry, this error field will be cleared. + description: error is the last observed error during snapshot creation, + if any. Upon success after retry, this error field will be cleared. properties: message: - description: |- - message is a string detailing the encountered error during snapshot - creation if specified. - NOTE: message may be logged, and it should not contain sensitive - information. + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' type: string time: description: time is the timestamp when the error was encountered. @@ -265,40 +211,38 @@ spec: type: string type: object readyToUse: - description: |- - readyToUse indicates if a snapshot is ready to be used to restore a volume. - In dynamic snapshot creation case, this field will be filled in by the - CSI snapshotter sidecar with the "ready_to_use" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "ready_to_use" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - otherwise, this field will be set to "True". - If not specified, it means the readiness of a snapshot is unknown. + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the CSI snapshotter sidecar with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. type: boolean restoreSize: - description: |- - restoreSize represents the complete size of the snapshot in bytes. - In dynamic snapshot creation case, this field will be filled in by the - CSI snapshotter sidecar with the "size_bytes" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "size_bytes" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - When restoring a volume from this snapshot, the size of the volume MUST NOT - be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - If not specified, it indicates that the size is unknown. + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be + filled in by the CSI snapshotter sidecar with the "size_bytes" value + returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "size_bytes" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. When restoring a volume from this snapshot, the size of the + volume MUST NOT be smaller than the restoreSize if it is specified, + otherwise the restoration will fail. If not specified, it indicates + that the size is unknown. format: int64 minimum: 0 type: integer snapshotHandle: - description: |- - snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. - If not specified, it indicates that dynamic snapshot creation has either failed - or it is still in progress. + description: snapshotHandle is the CSI "snapshot_id" of a snapshot + on the underlying storage system. If not specified, it indicates + that dynamic snapshot creation has either failed or it is still + in progress. type: string - volumeGroupSnapshotHandle: - description: |- - VolumeGroupSnapshotHandle is the CSI "group_snapshot_id" of a group snapshot - on the underlying storage system. + volumeGroupSnapshotContentName: + description: VolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent + of which this VolumeSnapshotContent is a part of. type: string type: object required: diff --git a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshots.yaml b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshots.yaml index 6b96d7082..3e7f99663 100644 --- a/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshots.yaml +++ b/internal/constellation/helm/charts/edgeless/csi/charts/snapshot-crds/templates/volumesnapshots.yaml @@ -3,8 +3,9 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.11.3 api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + creationTimestamp: null name: volumesnapshots.snapshot.storage.k8s.io spec: group: snapshot.storage.k8s.io @@ -60,140 +61,103 @@ spec: name: v1 schema: openAPIV3Schema: - description: |- - VolumeSnapshot is a user's request for either creating a point-in-time + description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - metadata: - type: object spec: - description: |- - spec defines the desired characteristics of a snapshot requested by a user. - More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots - Required. + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' properties: source: - description: |- - source specifies where a snapshot will be created from. - This field is immutable after creation. - Required. + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. properties: persistentVolumeClaimName: - description: |- - persistentVolumeClaimName specifies the name of the PersistentVolumeClaim - object representing the volume from which a snapshot should be created. - This PVC is assumed to be in the same namespace as the VolumeSnapshot - object. - This field should be set if the snapshot does not exists, and needs to be - created. + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object representing the volume from which + a snapshot should be created. This PVC is assumed to be in the + same namespace as the VolumeSnapshot object. This field should + be set if the snapshot does not exists, and needs to be created. This field is immutable. type: string - x-kubernetes-validations: - - message: persistentVolumeClaimName is immutable - rule: self == oldSelf volumeSnapshotContentName: - description: |- - volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent - object representing an existing volume snapshot. - This field should be set if the snapshot already exists and only needs a representation in Kubernetes. - This field is immutable. + description: volumeSnapshotContentName specifies the name of a + pre-existing VolumeSnapshotContent object representing an existing + volume snapshot. This field should be set if the snapshot already + exists and only needs a representation in Kubernetes. This field + is immutable. type: string - x-kubernetes-validations: - - message: volumeSnapshotContentName is immutable - rule: self == oldSelf type: object - x-kubernetes-validations: - - message: persistentVolumeClaimName is required once set - rule: '!has(oldSelf.persistentVolumeClaimName) || has(self.persistentVolumeClaimName)' - - message: volumeSnapshotContentName is required once set - rule: '!has(oldSelf.volumeSnapshotContentName) || has(self.volumeSnapshotContentName)' - - message: exactly one of volumeSnapshotContentName and persistentVolumeClaimName - must be set - rule: (has(self.volumeSnapshotContentName) && !has(self.persistentVolumeClaimName)) - || (!has(self.volumeSnapshotContentName) && has(self.persistentVolumeClaimName)) + oneOf: + - required: ["persistentVolumeClaimName"] + - required: ["volumeSnapshotContentName"] volumeSnapshotClassName: - description: |- - VolumeSnapshotClassName is the name of the VolumeSnapshotClass - requested by the VolumeSnapshot. - VolumeSnapshotClassName may be left nil to indicate that the default - SnapshotClass should be used. - A given cluster may have multiple default Volume SnapshotClasses: one - default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, - VolumeSnapshotSource will be checked to figure out what the associated - CSI Driver is, and the default VolumeSnapshotClass associated with that - CSI Driver will be used. If more than one VolumeSnapshotClass exist for - a given CSI Driver and more than one have been marked as default, - CreateSnapshot will fail and generate an event. - Empty string is not allowed for this field. + description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. VolumeSnapshotClassName may be + left nil to indicate that the default SnapshotClass should be used. + A given cluster may have multiple default Volume SnapshotClasses: + one default per CSI Driver. If a VolumeSnapshot does not specify + a SnapshotClass, VolumeSnapshotSource will be checked to figure + out what the associated CSI Driver is, and the default VolumeSnapshotClass + associated with that CSI Driver will be used. If more than one VolumeSnapshotClass + exist for a given CSI Driver and more than one have been marked + as default, CreateSnapshot will fail and generate an event. Empty + string is not allowed for this field.' type: string - x-kubernetes-validations: - - message: volumeSnapshotClassName must not be the empty string when - set - rule: size(self) > 0 required: - source type: object status: - description: |- - status represents the current information of a snapshot. - Consumers must verify binding between VolumeSnapshot and - VolumeSnapshotContent objects is successful (by validating that both - VolumeSnapshot and VolumeSnapshotContent point at each other) before - using this object. + description: status represents the current information of a snapshot. + Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent + objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object. properties: boundVolumeSnapshotContentName: - description: |- - boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent - object to which this VolumeSnapshot object intends to bind to. - If not specified, it indicates that the VolumeSnapshot object has not been - successfully bound to a VolumeSnapshotContent object yet. - NOTE: To avoid possible security issues, consumers must verify binding between - VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that - both VolumeSnapshot and VolumeSnapshotContent point at each other) before using - this object. + description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent + object to which this VolumeSnapshot object intends to bind to. If + not specified, it indicates that the VolumeSnapshot object has not + been successfully bound to a VolumeSnapshotContent object yet. NOTE: + To avoid possible security issues, consumers must verify binding + between VolumeSnapshot and VolumeSnapshotContent objects is successful + (by validating that both VolumeSnapshot and VolumeSnapshotContent + point at each other) before using this object.' type: string creationTime: - description: |- - creationTime is the timestamp when the point-in-time snapshot is taken - by the underlying storage system. - In dynamic snapshot creation case, this field will be filled in by the - snapshot controller with the "creation_time" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "creation_time" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - If not specified, it may indicate that the creation time of the snapshot is unknown. + description: creationTime is the timestamp when the point-in-time + snapshot is taken by the underlying storage system. In dynamic snapshot + creation case, this field will be filled in by the snapshot controller + with the "creation_time" value returned from CSI "CreateSnapshot" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "creation_time" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. If not specified, it may indicate + that the creation time of the snapshot is unknown. format: date-time type: string error: - description: |- - error is the last observed error during snapshot creation, if any. - This field could be helpful to upper level controllers(i.e., application controller) - to decide whether they should continue on waiting for the snapshot to be created - based on the type of error reported. - The snapshot controller will keep retrying when an error occurs during the - snapshot creation. Upon success, this error field will be cleared. + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. The snapshot controller will keep retrying when an error + occurs during the snapshot creation. Upon success, this error field + will be cleared. properties: message: - description: |- - message is a string detailing the encountered error during snapshot - creation if specified. - NOTE: message may be logged, and it should not contain sensitive - information. + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be + logged, and it should not contain sensitive information.' type: string time: description: time is the timestamp when the error was encountered. @@ -201,35 +165,32 @@ spec: type: string type: object readyToUse: - description: |- - readyToUse indicates if the snapshot is ready to be used to restore a volume. - In dynamic snapshot creation case, this field will be filled in by the - snapshot controller with the "ready_to_use" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "ready_to_use" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, - otherwise, this field will be set to "True". - If not specified, it means the readiness of a snapshot is unknown. + description: readyToUse indicates if the snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in by the snapshot controller with the "ready_to_use" + value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing + snapshot, this field will be filled with the "ready_to_use" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it, otherwise, this field will be set to "True". If not specified, + it means the readiness of a snapshot is unknown. type: boolean restoreSize: type: string - description: |- - restoreSize represents the minimum size of volume required to create a volume - from this snapshot. - In dynamic snapshot creation case, this field will be filled in by the - snapshot controller with the "size_bytes" value returned from CSI - "CreateSnapshot" gRPC call. - For a pre-existing snapshot, this field will be filled with the "size_bytes" - value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. - When restoring a volume from this snapshot, the size of the volume MUST NOT - be smaller than the restoreSize if it is specified, otherwise the restoration will fail. - If not specified, it indicates that the size is unknown. + description: restoreSize represents the minimum size of volume required + to create a volume from this snapshot. In dynamic snapshot creation + case, this field will be filled in by the snapshot controller with + the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. + For a pre-existing snapshot, this field will be filled with the + "size_bytes" value returned from the CSI "ListSnapshots" gRPC call + if the driver supports it. When restoring a volume from this snapshot, + the size of the volume MUST NOT be smaller than the restoreSize + if it is specified, otherwise the restoration will fail. If not + specified, it indicates that the size is unknown. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true volumeGroupSnapshotName: - description: |- - VolumeGroupSnapshotName is the name of the VolumeGroupSnapshot of which this - VolumeSnapshot is a part of. + description: VolumeGroupSnapshotName is the name of the VolumeGroupSnapshot + of which this VolumeSnapshot is a part of. type: string type: object required: diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/autoscalingstrategy-crd.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/autoscalingstrategy-crd.yaml index 9156e3e71..18dce5e37 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/autoscalingstrategy-crd.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/autoscalingstrategy-crd.yaml @@ -1,10 +1,9 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 name: autoscalingstrategies.update.edgeless.systems + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 spec: group: update.edgeless.systems names: @@ -21,19 +20,14 @@ spec: API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -54,8 +48,8 @@ spec: deployment. type: string enabled: - description: Enabled defines whether cluster autoscaling should be - enabled or not. + description: Enabled defines whether cluster autoscaling should be enabled + or not. type: boolean required: - deploymentName @@ -70,8 +64,7 @@ spec: enabled or not. type: boolean replicas: - description: Replicas is the number of replicas for the autoscaler - deployment. + description: Replicas is the number of replicas for the autoscaler deployment. format: int32 type: integer type: object @@ -80,3 +73,9 @@ spec: storage: true subresources: status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/joiningnode-crd.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/joiningnode-crd.yaml index 1beca7221..88fb65ae8 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/joiningnode-crd.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/joiningnode-crd.yaml @@ -1,10 +1,9 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 name: joiningnodes.update.edgeless.systems + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 spec: group: update.edgeless.systems names: @@ -20,19 +19,14 @@ spec: description: JoiningNode is the Schema for the joiningnodes API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -65,3 +59,9 @@ spec: storage: true subresources: status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/nodeversion-crd.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/nodeversion-crd.yaml index 4b7f7b7e0..9c46b695c 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/nodeversion-crd.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/nodeversion-crd.yaml @@ -1,10 +1,9 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 name: nodeversions.update.edgeless.systems + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 spec: group: update.edgeless.systems names: @@ -20,19 +19,14 @@ spec: description: NodeVersion is the Schema for the nodeversions API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -66,49 +60,65 @@ spec: description: AwaitingAnnotation is a list of nodes that are waiting for the operator to annotate them. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array budget: description: Budget is the amount of extra nodes that can be created @@ -119,35 +129,43 @@ spec: description: Conditions represent the latest available observations of an object's state items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -162,6 +180,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -177,389 +199,516 @@ spec: description: Donors is a list of outdated nodes that donate labels to heirs. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array heirs: description: Heirs is a list of nodes using the latest image that still need to inherit labels from donors. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array invalid: description: Invalid is a list of invalid nodes (nodes that cannot be processed by the operator due to missing information or transient faults). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array mints: description: Mints is a list of up to date nodes that will become heirs. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array obsolete: description: Obsolete is a list of obsolete nodes (nodes that have been created by the operator but are no longer needed). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array outdated: description: Outdated is a list of nodes that are using an outdated image. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array pending: description: Pending is a list of pending nodes (joining or leaving the cluster). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array upToDate: description: UpToDate is a list of nodes that are using the latest image and labels. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array required: - - activeclusterversionupgrade - budget - conditions type: object diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/pendingnode-crd.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/pendingnode-crd.yaml index c6cd2db6a..41b5a4cd7 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/pendingnode-crd.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/pendingnode-crd.yaml @@ -1,10 +1,9 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 name: pendingnodes.update.edgeless.systems + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 spec: group: update.edgeless.systems names: @@ -20,19 +19,14 @@ spec: description: PendingNode is the Schema for the pendingnodes API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -40,11 +34,10 @@ spec: description: PendingNodeSpec defines the desired state of PendingNode. properties: deadline: - description: |- - Deadline is the deadline for reaching the goal state. - Joining nodes will be terminated if the deadline is exceeded. - Leaving nodes will remain as unschedulable to prevent data loss. - If not specified, the node may remain in the pending state indefinitely. + description: Deadline is the deadline for reaching the goal state. Joining + nodes will be terminated if the deadline is exceeded. Leaving nodes + will remain as unschedulable to prevent data loss. If not specified, + the node may remain in the pending state indefinitely. format: date-time type: string goal: @@ -54,8 +47,8 @@ spec: - Leave type: string groupID: - description: ScalingGroupID is the ID of the group that this node - shall be part of. + description: ScalingGroupID is the ID of the group that this node shall + be part of. type: string nodeName: description: NodeName is the kubernetes internal name of the node. @@ -79,8 +72,7 @@ spec: - Failed type: string reachedGoal: - description: ReachedGoal is true if the node has reached the goal - state. + description: ReachedGoal is true if the node has reached the goal state. type: boolean type: object type: object @@ -88,3 +80,9 @@ spec: storage: true subresources: status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/scalinggroup-crd.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/scalinggroup-crd.yaml index 5eed4ebc8..0e334ae29 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/scalinggroup-crd.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/crds/scalinggroup-crd.yaml @@ -1,10 +1,9 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.4 name: scalinggroups.update.edgeless.systems + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 spec: group: update.edgeless.systems names: @@ -20,19 +19,14 @@ spec: description: ScalingGroup is the Schema for the scalinggroups API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -43,16 +37,16 @@ spec: description: AutoscalerGroupName is name that is expected by the autoscaler. type: string autoscaling: - description: Autoscaling specifies wether the scaling group should - automatically scale using the cluster-autoscaler. + description: Autoscaling specifies wether the scaling group should automatically + scale using the cluster-autoscaler. type: boolean groupId: - description: GroupID is the CSP specific, canonical identifier of - a scaling group. + description: GroupID is the CSP specific, canonical identifier of a + scaling group. type: string max: - description: Max is the maximum number of autoscaled nodes in the - scaling group (used by cluster-autoscaler). + description: Max is the maximum number of autoscaled nodes in the scaling + group (used by cluster-autoscaler). format: int32 type: integer min: @@ -61,11 +55,11 @@ spec: format: int32 type: integer nodeGroupName: - description: NodeGroupName is the human friendly name of the node - group as defined in the Constellation configuration. + description: NodeGroupName is the human friendly name of the node group + as defined in the Constellation configuration. type: string nodeImage: - description: NodeVersion is the name of the NodeVersion resource. + description: NodeImage is the name of the NodeImage resource. type: string role: description: Role is the role of the nodes in the scaling group. @@ -81,36 +75,44 @@ spec: description: Conditions represent the latest available observations of an object's state. items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating details + about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of + specific condition types may define expected values and meanings + for this field, and whether the values are considered a guaranteed + API. The value should be a CamelCase string. This field may + not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -124,6 +126,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -136,8 +142,8 @@ spec: type: object type: array imageReference: - description: ImageReference is the image currently used for newly - created nodes in this scaling group. + description: ImageReference is the image currently used for newly created + nodes in this scaling group. type: string required: - conditions @@ -147,3 +153,9 @@ spec: storage: true subresources: status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/deployment.yaml index 5afe29d9b..9083df141 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/deployment.yaml @@ -42,8 +42,6 @@ spec: value: {{ .Values.csp | quote }} - name: constellation-uid value: {{ .Values.constellationUID | quote }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json image: {{ .Values.controllerManager.manager.image | quote }} livenessProbe: httpGet: @@ -74,9 +72,6 @@ spec: - mountPath: /etc/gce name: gceconf readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - mountPath: /etc/constellation-upgrade-agent.sock name: upgrade-agent-socket readOnly: true @@ -114,10 +109,6 @@ spec: name: gceconf optional: true name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - name: upgrade-agent-socket hostPath: path: /run/constellation-upgrade-agent.sock diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/manager-rbac.yaml index 0e271ff7f..45dddbdd9 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -13,7 +13,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -59,10 +58,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -75,20 +70,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -107,6 +120,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/charts/edgeless/operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/charts/edgeless/operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index c5d5a211b..52b9e568a 100644 --- a/internal/constellation/helm/charts/edgeless/operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/charts/edgeless/operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -12,13 +12,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/chartutil.go b/internal/constellation/helm/chartutil.go index 1f5017519..405b57175 100644 --- a/internal/constellation/helm/chartutil.go +++ b/internal/constellation/helm/chartutil.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/helm/cilium.patch b/internal/constellation/helm/cilium.patch new file mode 100644 index 000000000..cc12f4cb5 --- /dev/null +++ b/internal/constellation/helm/cilium.patch @@ -0,0 +1,103 @@ +diff --git a/install/kubernetes/cilium/templates/cilium-configmap.yaml b/install/kubernetes/cilium/templates/cilium-configmap.yaml +index 4ac3b006e3..3541e3d380 100644 +--- a/install/kubernetes/cilium/templates/cilium-configmap.yaml ++++ b/install/kubernetes/cilium/templates/cilium-configmap.yaml +@@ -608,7 +608,9 @@ data: + {{- if .Values.encryption.strictMode.enabled }} + enable-encryption-strict-mode: {{ .Values.encryption.strictMode.enabled | quote }} + +- encryption-strict-mode-cidr: {{ .Values.encryption.strictMode.cidr | quote }} ++ encryption-strict-mode-node-cidrs: {{ .Values.encryption.strictMode.nodeCIDRList | join " " | quote }} ++ ++ encryption-strict-mode-pod-cidrs: {{ .Values.encryption.strictMode.podCIDRList | join " " | quote }} + + encryption-strict-mode-allow-remote-node-identities: {{ .Values.encryption.strictMode.allowRemoteNodeIdentities | quote }} + {{- end }} +diff --git a/install/kubernetes/cilium/values.yaml b/install/kubernetes/cilium/values.yaml +index c00e9af831..4661c16f56 100644 +--- a/install/kubernetes/cilium/values.yaml ++++ b/install/kubernetes/cilium/values.yaml +@@ -794,17 +794,21 @@ encryption: + # This option is only effective when encryption.type is set to "wireguard". + nodeEncryption: false + +- # -- Configure the WireGuard Pod2Pod strict mode. ++ # -- Configure the WireGuard strict mode. + strictMode: +- # -- Enable WireGuard Pod2Pod strict mode. ++ # -- Enable WireGuard strict mode. + enabled: false ++ ++ # -- podCIDRList for the WireGuard strict mode. ++ podCIDRList: [] + +- # -- CIDR for the WireGuard Pod2Pod strict mode. +- cidr: "" ++ # -- nodeCIDRList for the WireGuard strict mode. ++ nodeCIDRList: [] + + # -- Allow dynamic lookup of remote node identities. + # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. +- allowRemoteNodeIdentities: false ++ # This is also required when control-plane nodes are exempted from node-to-node encryption. ++ allowRemoteNodeIdentities: true + + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. +diff --git a/install/kubernetes/cilium/Chart.yaml b/install/kubernetes/cilium/Chart.yaml +index 256a79542..3f3fc714b 100644 +--- a/install/kubernetes/cilium/Chart.yaml ++++ b/install/kubernetes/cilium/Chart.yaml +@@ -2,8 +2,8 @@ apiVersion: v2 + name: cilium + displayName: Cilium + home: https://cilium.io/ +-version: 1.15.0-pre.3 +-appVersion: 1.15.0-pre.3 ++version: 1.15.0-pre.3-edg.3 ++appVersion: 1.15.0-pre.3-edg.3 + kubeVersion: ">= 1.16.0-0" + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg + description: eBPF-based Networking, Security, and Observability +diff --git a/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml b/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml +index f6b493cb7..50b80267a 100644 +--- a/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml ++++ b/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml +@@ -715,6 +715,37 @@ spec: + - name: cni-path + mountPath: /host/opt/cni/bin + {{- end }} # .Values.cni.install ++ - name: firewall-pods ++ image: ghcr.io/edgelesssys/cilium/cilium:v1.15.0-pre.3-edg.2@sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749 ++ imagePullPolicy: IfNotPresent ++ command: ++ - /bin/bash ++ - -exc ++ - | ++ pref=32 ++ interface=$(ip route | awk '/^default/ { print $5 }') ++ tc qdisc add dev "${interface}" clsact || true ++ tc filter del dev "${interface}" ingress pref "${pref}" 2>/dev/null || true ++ handle=0 ++ for cidr in ${POD_CIDRS}; do ++ handle=$((handle + 1)) ++ tc filter replace dev "${interface}" ingress pref "${pref}" handle "${handle}" protocol ip flower dst_ip "${cidr}" action drop ++ done ++ env: ++ - name: POD_CIDRS ++ valueFrom: ++ configMapKeyRef: ++ key: encryption-strict-mode-pod-cidrs ++ name: cilium-config ++ optional: true ++ resources: ++ requests: ++ cpu: 100m ++ memory: 20Mi ++ securityContext: ++ capabilities: ++ add: ++ - NET_ADMIN + restartPolicy: Always + priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.priorityClassName "system-node-critical") }} + serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }} diff --git a/internal/constellation/helm/corednsgen/BUILD.bazel b/internal/constellation/helm/corednsgen/BUILD.bazel deleted file mode 100644 index 4dbc61a61..000000000 --- a/internal/constellation/helm/corednsgen/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "corednsgen_lib", - srcs = ["corednsgen.go"], - importpath = "github.com/edgelesssys/constellation/v2/internal/constellation/helm/corednsgen", - visibility = ["//visibility:private"], - deps = [ - "//internal/versions", - "@com_github_regclient_regclient//:regclient", - "@com_github_regclient_regclient//types/ref", - "@io_k8s_api//apps/v1:apps", - "@io_k8s_api//core/v1:core", - "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm", - "@io_k8s_kubernetes//cmd/kubeadm/app/images", - "@io_k8s_kubernetes//cmd/kubeadm/app/phases/addons/dns", - "@io_k8s_kubernetes//cmd/kubeadm/app/util", - "@io_k8s_sigs_yaml//:yaml", - ], -) - -go_binary( - name = "corednsgen", - embed = [":corednsgen_lib"], - visibility = ["//:__subpackages__"], -) diff --git a/internal/constellation/helm/corednsgen/corednsgen.go b/internal/constellation/helm/corednsgen/corednsgen.go deleted file mode 100644 index c648cca69..000000000 --- a/internal/constellation/helm/corednsgen/corednsgen.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -// corednsgen synthesizes a Helm chart from the resource templates embedded in -// kubeadm and writes it to the `charts` directory underneath the current -// working directory. This removes the existing `coredns` subdirectory! -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "path/filepath" - - "github.com/edgelesssys/constellation/v2/internal/versions" - "github.com/regclient/regclient" - "github.com/regclient/regclient/types/ref" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/images" - kubedns "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns" - kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - "sigs.k8s.io/yaml" -) - -const configMapName = "edg-coredns" - -var chartDir = flag.String("charts", "./charts", "target directory to create charts in") - -func main() { - flag.Parse() - - if err := os.RemoveAll(filepath.Join(*chartDir, "coredns")); err != nil { - log.Fatalf("Could not remove chart dir: %v", err) - } - - writeFileRelativeToChartDir(chartYAML(), "Chart.yaml") - writeFileRelativeToChartDir(valuesYAML(), "values.yaml") - - writeTemplate(kubedns.CoreDNSServiceAccount, "serviceaccount.yaml") - writeTemplate(kubedns.CoreDNSClusterRole, "clusterrole.yaml") - writeTemplate(kubedns.CoreDNSClusterRoleBinding, "clusterrolebinding.yaml") - writeTemplate(kubedns.CoreDNSService, "service.yaml") - - writeFileRelativeToChartDir(patchedConfigMap(), "templates", "configmap.yaml") - writeFileRelativeToChartDir(patchedDeployment(), "templates", "deployment.yaml") -} - -func chartYAML() []byte { - chart := map[string]string{ - "apiVersion": "v2", - "name": "kube-dns", - "version": "0.0.0", - } - data, err := yaml.Marshal(chart) - if err != nil { - log.Fatalf("Could not marshal Chart.yaml: %v", err) - } - return data -} - -func valuesYAML() []byte { - cfg := &kubeadm.ClusterConfiguration{ - KubernetesVersion: string(versions.Default), - ImageRepository: "registry.k8s.io", - } - img := images.GetDNSImage(cfg) - ref, err := ref.New(img) - if err != nil { - log.Fatalf("Could not parse image reference: %v", err) - } - - rc := regclient.New() - m, err := rc.ManifestGet(context.Background(), ref) - if err != nil { - log.Fatalf("Could not get image manifest: %v", err) - } - - values := map[string]string{ - "clusterIP": "10.96.0.10", - "dnsDomain": "cluster.local", - "image": fmt.Sprintf("%s/%s:%s@%s", ref.Registry, ref.Repository, ref.Tag, m.GetDescriptor().Digest.String()), - } - data, err := yaml.Marshal(values) - if err != nil { - log.Fatalf("Could not marshal values.yaml: %v", err) - } - return data -} - -// patchedConfigMap renames the CoreDNS ConfigMap such that kubeadm does not find it. -// -// See https://github.com/kubernetes/kubeadm/issues/2846#issuecomment-1899942683. -func patchedConfigMap() []byte { - var cm corev1.ConfigMap - if err := yaml.Unmarshal(parseTemplate(kubedns.CoreDNSConfigMap), &cm); err != nil { - log.Fatalf("Could not parse configmap: %v", err) - } - - cm.Name = configMapName - - out, err := yaml.Marshal(cm) - if err != nil { - log.Fatalf("Could not marshal patched deployment: %v", err) - } - return out -} - -// patchedDeployment extracts the CoreDNS Deployment from kubeadm, adds necessary tolerations and updates the ConfigMap reference. -func patchedDeployment() []byte { - var d appsv1.Deployment - if err := yaml.Unmarshal(parseTemplate(kubedns.CoreDNSDeployment), &d); err != nil { - log.Fatalf("Could not parse deployment: %v", err) - } - - tolerations := []corev1.Toleration{ - {Key: "node.cloudprovider.kubernetes.io/uninitialized", Value: "true", Effect: corev1.TaintEffectNoSchedule}, - {Key: "node.kubernetes.io/unreachable", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute, TolerationSeconds: toPtr(int64(10))}, - } - d.Spec.Template.Spec.Tolerations = append(d.Spec.Template.Spec.Tolerations, tolerations...) - - for i, vol := range d.Spec.Template.Spec.Volumes { - if vol.ConfigMap != nil { - vol.ConfigMap.Name = configMapName - } - d.Spec.Template.Spec.Volumes[i] = vol - } - - out, err := yaml.Marshal(d) - if err != nil { - log.Fatalf("Could not marshal patched deployment: %v", err) - } - return out -} - -func writeFileRelativeToChartDir(content []byte, pathElements ...string) { - p := filepath.Join(append([]string{*chartDir, "coredns"}, pathElements...)...) - d := filepath.Dir(p) - if err := os.MkdirAll(d, 0o755); err != nil { - log.Fatalf("Could not create dir %q: %v", d, err) - } - if err := os.WriteFile(p, content, 0o644); err != nil { - log.Fatalf("Could not write file %q: %v", p, err) - } -} - -// parseTemplate replaces the Go template placeholders in kubeadm resources -// with fixed values or Helm value placeholders. -func parseTemplate(tmpl string) []byte { - vars := struct { - DeploymentName, Image, ControlPlaneTaintKey, DNSDomain, DNSIP string - Replicas *int32 - }{ - DeploymentName: "coredns", - DNSDomain: `{{ .Values.dnsDomain }}`, - DNSIP: `"{{ .Values.clusterIP }}"`, - Image: `"{{ .Values.image }}"`, - ControlPlaneTaintKey: "node-role.kubernetes.io/control-plane", - Replicas: toPtr(int32(2)), - } - data, err := kubeadmutil.ParseTemplate(tmpl, vars) - if err != nil { - log.Fatalf("Could not interpolate template: %v", err) - } - return data -} - -func writeTemplate(tmpl string, name string) { - data := parseTemplate(tmpl) - writeFileRelativeToChartDir(data, "templates", name) -} - -func toPtr[T any](v T) *T { - return &v -} diff --git a/internal/constellation/helm/generateCertManager.sh b/internal/constellation/helm/generateCertManager.sh index 80df82775..02b98676e 100755 --- a/internal/constellation/helm/generateCertManager.sh +++ b/internal/constellation/helm/generateCertManager.sh @@ -5,7 +5,7 @@ set -o errtrace shopt -s inherit_errexit echo "Pulling cert-manager Helm chart..." -version="1.15.0" +version="1.12.6" function cleanup { rm -rf "charts/cert-manager/README.md" "charts/cert-manager-v${version}.tgz" @@ -38,7 +38,7 @@ yq eval -i '.cainjector.image.digest = "sha256:'"${v}"'"' charts/cert-manager/va v=$(get_sha256_hash "cert-manager-acmesolver") yq eval -i '.acmesolver.image.digest = "sha256:'"${v}"'"' charts/cert-manager/values.yaml -v=$(get_sha256_hash "cert-manager-startupapicheck") +v=$(get_sha256_hash "cert-manager-ctl") yq eval -i '.startupapicheck.image.digest = "sha256:'"${v}"'"' charts/cert-manager/values.yaml echo # final newline diff --git a/internal/constellation/helm/generateCilium.sh b/internal/constellation/helm/generateCilium.sh index acf28ca77..df2a625bd 100755 --- a/internal/constellation/helm/generateCilium.sh +++ b/internal/constellation/helm/generateCilium.sh @@ -21,13 +21,14 @@ git clone \ --no-checkout \ --sparse \ --depth 1 \ - -b v1.15.19-edg.0 \ - https://github.com/edgelesssys/cilium.git + -b 1.15.0-pre.3 \ + https://github.com/cilium/cilium.git cd cilium git sparse-checkout add install/kubernetes/cilium git checkout +git apply "${calldir}/cilium.patch" cp -r install/kubernetes/cilium "${calldir}/charts" echo # final newline diff --git a/internal/constellation/helm/helm.go b/internal/constellation/helm/helm.go index 3ac7be9e1..dcc994c6c 100644 --- a/internal/constellation/helm/helm.go +++ b/internal/constellation/helm/helm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -91,7 +91,6 @@ type Options struct { HelmWaitMode WaitMode ApplyTimeout time.Duration OpenStackValues *OpenStackValues - ServiceCIDR string } // PrepareApply loads the charts and returns the executor to apply them. @@ -115,8 +114,7 @@ func (h Client) loadReleases( ) ([]release, error) { helmLoader := newLoader(flags.CSP, flags.AttestationVariant, flags.K8sVersion, stateFile, h.cliVersion) h.log.Debug("Created new Helm loader") - // TODO(burgerdev): pass down the entire flags struct - return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, flags.OpenStackValues, flags.ServiceCIDR) + return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, flags.OpenStackValues) } // Applier runs the Helm actions. diff --git a/internal/constellation/helm/helm_test.go b/internal/constellation/helm/helm_test.go index 8b36ea80c..cd8aab6a6 100644 --- a/internal/constellation/helm/helm_test.go +++ b/internal/constellation/helm/helm_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -194,12 +194,11 @@ func TestHelmApply(t *testing.T) { awsLbVersion = *tc.clusterAWSLBVersion } - certManagerVersion := "v1.15.0" // current version + certManagerVersion := "v1.12.6" // current version if tc.clusterCertManagerVersion != nil { certManagerVersion = *tc.clusterCertManagerVersion } - helmListVersion(lister, "cilium", "v1.15.19-edg.0") - helmListVersion(lister, "coredns", "v0.0.0") + helmListVersion(lister, "cilium", "v1.15.0-pre.3-edg.3") helmListVersion(lister, "cert-manager", certManagerVersion) helmListVersion(lister, "constellation-services", tc.clusterMicroServiceVersion) helmListVersion(lister, "constellation-operators", tc.clusterMicroServiceVersion) diff --git a/internal/constellation/helm/imageversion/imageversion.go b/internal/constellation/helm/imageversion/imageversion.go index 266840bda..a29126f87 100644 --- a/internal/constellation/helm/imageversion/imageversion.go +++ b/internal/constellation/helm/imageversion/imageversion.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package imageversion contains the pinned container images for the helm charts. diff --git a/internal/constellation/helm/imageversion/placeholder.go b/internal/constellation/helm/imageversion/placeholder.go index a754ade5e..22bb65b41 100644 --- a/internal/constellation/helm/imageversion/placeholder.go +++ b/internal/constellation/helm/imageversion/placeholder.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package imageversion diff --git a/internal/constellation/helm/loader.go b/internal/constellation/helm/loader.go index 61822da50..a3c6a50fa 100644 --- a/internal/constellation/helm/loader.go +++ b/internal/constellation/helm/loader.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -15,9 +15,9 @@ import ( "strings" "github.com/pkg/errors" + "helm.sh/helm/pkg/ignore" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/ignore" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -31,7 +31,6 @@ import ( // Run `go generate` to download (and patch) upstream helm charts. //go:generate ./generateCilium.sh -//go:generate go run ./corednsgen/ //go:generate ./update-csi-charts.sh //go:generate ./generateCertManager.sh //go:generate ./update-aws-load-balancer-chart.sh @@ -47,7 +46,6 @@ type chartInfo struct { var ( // Charts we fetch from an upstream with real versions. - coreDNSInfo = chartInfo{releaseName: "coredns", chartName: "coredns", path: "charts/coredns"} ciliumInfo = chartInfo{releaseName: "cilium", chartName: "cilium", path: "charts/cilium"} certManagerInfo = chartInfo{releaseName: "cert-manager", chartName: "cert-manager", path: "charts/cert-manager"} awsLBControllerInfo = chartInfo{releaseName: "aws-load-balancer-controller", chartName: "aws-load-balancer-controller", path: "charts/aws-load-balancer-controller"} @@ -126,7 +124,7 @@ type OpenStackValues struct { // loadReleases loads the embedded helm charts and returns them as a HelmReleases object. func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret, - serviceAccURI string, openStackValues *OpenStackValues, serviceCIDR string, + serviceAccURI string, openStackValues *OpenStackValues, ) (releaseApplyOrder, error) { ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode) if err != nil { @@ -135,16 +133,6 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa ciliumVals := extraCiliumValues(i.csp, conformanceMode, i.stateFile.Infrastructure) ciliumRelease.values = mergeMaps(ciliumRelease.values, ciliumVals) - coreDNSRelease, err := i.loadRelease(coreDNSInfo, helmWaitMode) - if err != nil { - return nil, fmt.Errorf("loading coredns: %w", err) - } - coreDNSVals, err := extraCoreDNSValues(serviceCIDR) - if err != nil { - return nil, fmt.Errorf("loading coredns values: %w", err) - } - coreDNSRelease.values = mergeMaps(coreDNSRelease.values, coreDNSVals) - certManagerRelease, err := i.loadRelease(certManagerInfo, helmWaitMode) if err != nil { return nil, fmt.Errorf("loading cert-manager: %w", err) @@ -168,7 +156,7 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa } conServicesRelease.values = mergeMaps(conServicesRelease.values, svcVals) - releases := releaseApplyOrder{ciliumRelease, coreDNSRelease, conServicesRelease, certManagerRelease, operatorRelease} + releases := releaseApplyOrder{ciliumRelease, conServicesRelease, certManagerRelease, operatorRelease} if deployCSIDriver { csiRelease, err := i.loadRelease(csiInfo, WaitModeNone) if err != nil { @@ -236,8 +224,6 @@ func (i *chartLoader) loadRelease(info chartInfo, helmWaitMode WaitMode) (releas values = i.loadAWSLBControllerValues() case csiInfo.releaseName: values = i.loadCSIValues() - default: - values = map[string]any{} } // Charts we package ourselves have version 0.0.0. @@ -381,18 +367,16 @@ func (i *chartLoader) loadCiliumValues(cloudprovider.Provider) (map[string]any, "image": map[string]any{ "repository": "ghcr.io/edgelesssys/cilium/cilium", "suffix": "", - "tag": "v1.15.19-edg.0", - "digest": "sha256:700218a5ffc10473ce9b09d560b8e0e3ed1309a4d57a9273da2ed16e3e1533f3", + "tag": "v1.15.0-pre.3-edg.2", + "digest": "sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749", "useDigest": true, }, "operator": map[string]any{ "image": map[string]any{ - "repository": "ghcr.io/edgelesssys/cilium/operator", - "suffix": "", - "tag": "v1.15.19-edg.0", - // Careful: this is the digest of ghcr.io/.../operator-generic! - // See magic image manipulation in ./helm/charts/cilium/templates/cilium-operator/_helpers.tpl. - "genericDigest": "sha256:5db046fea42cb1239d4eaa0f870d10e77911768a1eaf34c4968488dea93e27c4", + "repository": "ghcr.io/edgelesssys/cilium/operator", + "suffix": "", + "tag": "v1.15.0-pre.3-edg.2", + "genericDigest": "sha256:4ea9de5cfeb4554b82b509f0de41120a90e35a15e81a04f76c4cb405ddea3e7c", "useDigest": true, }, "podDisruptionBudget": map[string]any{ diff --git a/internal/constellation/helm/loader_test.go b/internal/constellation/helm/loader_test.go index 6ee767e73..762f544b3 100644 --- a/internal/constellation/helm/loader_test.go +++ b/internal/constellation/helm/loader_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -94,7 +94,7 @@ func TestLoadReleases(t *testing.T) { helmReleases, err := chartLoader.loadReleases( true, false, WaitModeAtomic, uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")}, - fakeServiceAccURI(cloudprovider.GCP), nil, "172.16.128.0/17", + fakeServiceAccURI(cloudprovider.GCP), nil, ) require.NoError(err) for _, release := range helmReleases { @@ -260,55 +260,6 @@ func TestConstellationServices(t *testing.T) { } } -func TestExtraCoreDNSValues(t *testing.T) { - testCases := map[string]struct { - cidr string - wantIP string - wantUnset bool - wantErr bool - }{ - "default": { - cidr: "10.96.0.0/12", - wantIP: "10.96.0.10", - }, - "custom": { - cidr: "172.16.128.0/17", - wantIP: "172.16.128.10", - }, - "too small": { - cidr: "172.16.0.0/30", - wantErr: true, - }, - "bad ip": { - cidr: "cluster.local", - wantErr: true, - }, - "v6": { - cidr: "fd12:3456:789a:100::/56", - wantIP: "fd12:3456:789a:100::a", - }, - "no ip": { - wantUnset: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - values, err := extraCoreDNSValues(tc.cidr) - if tc.wantErr { - assert.Error(t, err) - return - } - ip, ok := values["clusterIP"] - if tc.wantUnset { - assert.False(t, ok) - return - } - assert.Equal(t, tc.wantIP, ip) - }) - } -} - // TestOperators checks if the rendered constellation-services chart produces the expected yaml files. func TestOperators(t *testing.T) { testCases := map[string]struct { diff --git a/internal/constellation/helm/overrides.go b/internal/constellation/helm/overrides.go index 6c05c0b10..cf454735a 100644 --- a/internal/constellation/helm/overrides.go +++ b/internal/constellation/helm/overrides.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* Overrides contains helm values that are dynamically injected into the helm charts. @@ -21,21 +21,8 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/constellation/state" "github.com/edgelesssys/constellation/v2/internal/kms/uri" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) -func extraCoreDNSValues(serviceCIDR string) (map[string]any, error) { - if serviceCIDR == "" { - return map[string]any{}, nil - } - ip, err := kubeadmconstants.GetDNSIP(serviceCIDR) - if err != nil { - return nil, fmt.Errorf("calculating DNS service IP: %w", err) - } - - return map[string]any{"clusterIP": ip.String()}, nil -} - // TODO(malt3): switch over to DNS name on AWS and Azure // soon as every apiserver certificate of every control-plane node // has the dns endpoint in its SAN list. @@ -46,9 +33,17 @@ func extraCoreDNSValues(serviceCIDR string) (map[string]any, error) { // Also, the charts are not rendered correctly without all of these values. func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, output state.Infrastructure) map[string]any { extraVals := map[string]any{} + if conformanceMode { + extraVals["kubeProxyReplacementHealthzBindAddr"] = "" + extraVals["kubeProxyReplacement"] = "partial" + extraVals["sessionAffinity"] = true + extraVals["cni"] = map[string]any{ + "chainingMode": "portmap", + } + } strictMode := map[string]any{} - // TODO: Once we are able to set the subnet of the load balancer VMs + // TODO(@3u13r): Once we are able to set the subnet of the load balancer VMs // on STACKIT, we can remove the OpenStack exception here. if provider != cloudprovider.QEMU && provider != cloudprovider.OpenStack { strictMode = map[string]any{ @@ -80,30 +75,6 @@ func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, ou }, } - // When --conformance is set, we try to mitigate https://github.com/cilium/cilium/issues/9207 - // Users are discouraged of ever using this mode, except if they truly - // require protocol differentiation to work and cannot mitigate that any other way. - // Since there should always be workarounds, we only support this mode to - // pass the K8s conformance tests. It is not supported to switch to or from - // this mode after Constellation has been initialized. - if conformanceMode { - extraVals["kubeProxyReplacementHealthzBindAddr"] = "" - extraVals["kubeProxyReplacement"] = "false" - extraVals["sessionAffinity"] = true - extraVals["cni"] = map[string]any{ - "chainingMode": "portmap", - } - extraVals["ipMasqAgent"] = map[string]any{ - "enabled": false, - } - extraVals["bpf"] = map[string]any{ - "masquerade": false, - } - extraVals["k8s"] = map[string]any{ - "serviceProxyName": "cilium", - } - } - return extraVals } @@ -243,7 +214,7 @@ func getCCMConfig(azureState state.Azure, serviceAccURI string) ([]byte, error) ResourceGroup: azureState.ResourceGroup, LoadBalancerSku: "standard", SecurityGroupName: azureState.NetworkSecurityGroupName, - LoadBalancerName: "kubernetes-lb", + LoadBalancerName: azureState.LoadBalancerName, UseInstanceMetadata: true, VMType: "vmss", Location: creds.Location, diff --git a/internal/constellation/helm/release.go b/internal/constellation/helm/release.go index 21ad1e9aa..c7be7ab5c 100644 --- a/internal/constellation/helm/release.go +++ b/internal/constellation/helm/release.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package helm provides types and functions shared across services. diff --git a/internal/constellation/helm/retryaction.go b/internal/constellation/helm/retryaction.go index 4725c8659..89579356e 100644 --- a/internal/constellation/helm/retryaction.go +++ b/internal/constellation/helm/retryaction.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim return fmt.Errorf("helm install: %w", err) } retryLoopFinishDuration := time.Since(retryLoopStartTime) - log.Debug(fmt.Sprintf("Helm chart %q installation finished after %q", action.ReleaseName(), retryLoopFinishDuration)) + log.Debug(fmt.Sprintf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration)) return nil } @@ -61,9 +61,9 @@ type applyDoer struct { // Do tries to apply the action. func (i applyDoer) Do(ctx context.Context) error { - i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %q", i.applier.ReleaseName())) + i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %s", i.applier.ReleaseName())) if err := i.applier.apply(ctx); err != nil { - i.log.Debug(fmt.Sprintf("Helm chart installation %q failed: %q", i.applier.ReleaseName(), err)) + i.log.Debug(fmt.Sprintf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err)) return err } diff --git a/internal/constellation/helm/retryaction_test.go b/internal/constellation/helm/retryaction_test.go index f7259a358..6a39d7cb2 100644 --- a/internal/constellation/helm/retryaction_test.go +++ b/internal/constellation/helm/retryaction_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm @@ -64,7 +64,7 @@ func TestRetryApply(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - err := retryApply(t.Context(), tc.applier, time.Millisecond, logger.NewTest(t)) + err := retryApply(context.Background(), tc.applier, time.Millisecond, logger.NewTest(t)) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/constellation/helm/serviceversion.go b/internal/constellation/helm/serviceversion.go index 06603e293..a3d9ca57c 100644 --- a/internal/constellation/helm/serviceversion.go +++ b/internal/constellation/helm/serviceversion.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml index fa41d793b..01d9e6f57 100644 --- a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -37,63 +37,58 @@ spec: kubectl.kubernetes.io/default-container: manager spec: containers: - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=:8080 - - --leader-elect - command: - - /node-operator - env: - - name: KUBERNETES_CLUSTER_DOMAIN - value: cluster.local - - name: CONSTEL_CSP - value: GCP - - name: constellation-uid - value: "42424242424242" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json - image: constellationOperatorImage - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - volumeMounts: - - mountPath: /etc/kubernetes/pki/etcd - name: etcd-certs - - mountPath: /host/usr/lib/os-release - name: usr-lib-os-release - - mountPath: /etc/os-release - name: etc-os-release - - mountPath: /etc/azure - name: azureconfig - readOnly: true - - mountPath: /etc/gce - name: gceconf - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /etc/constellation-upgrade-agent.sock - name: upgrade-agent-socket - readOnly: true + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /node-operator + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: GCP + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true nodeSelector: node-role.kubernetes.io/control-plane: "" securityContext: @@ -101,38 +96,34 @@ spec: serviceAccountName: constellation-operator-controller-manager terminationGracePeriodSeconds: 10 tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists volumes: - - hostPath: - path: /etc/kubernetes/pki/etcd - type: Directory - name: etcd-certs - - hostPath: - path: /usr/lib/os-release - type: File - name: usr-lib-os-release - - hostPath: - path: /etc/os-release - type: File - name: etc-os-release - - name: azureconfig - secret: - optional: true - secretName: azureconfig - - configMap: - name: gceconf - optional: true + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - - name: upgrade-agent-socket - hostPath: - path: /run/constellation-upgrade-agent.sock - type: Socket + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml index 56bf77080..4fa4863c8 100644 --- a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -16,7 +16,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -62,10 +61,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -78,20 +73,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -110,6 +123,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index e364d1498..fa9f582a4 100644 --- a/internal/constellation/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/AWS/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -15,13 +15,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml index 538883439..2156f82a6 100644 --- a/internal/constellation/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/testdata/AWS/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider=AWS - --key-service-endpoint=key-service.testNamespace:9000 - --attestation-variant=aws-nitro-tpm - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: /var/config name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: 9090 name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/deployment.yaml index 23b5ac730..d50c5492f 100644 --- a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -37,63 +37,58 @@ spec: kubectl.kubernetes.io/default-container: manager spec: containers: - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=:8080 - - --leader-elect - command: - - /node-operator - env: - - name: KUBERNETES_CLUSTER_DOMAIN - value: cluster.local - - name: CONSTEL_CSP - value: Azure - - name: constellation-uid - value: "42424242424242" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json - image: constellationOperatorImage - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - volumeMounts: - - mountPath: /etc/kubernetes/pki/etcd - name: etcd-certs - - mountPath: /host/usr/lib/os-release - name: usr-lib-os-release - - mountPath: /etc/os-release - name: etc-os-release - - mountPath: /etc/azure - name: azureconfig - readOnly: true - - mountPath: /etc/gce - name: gceconf - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /etc/constellation-upgrade-agent.sock - name: upgrade-agent-socket - readOnly: true + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /node-operator + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: Azure + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true nodeSelector: node-role.kubernetes.io/control-plane: "" securityContext: @@ -101,38 +96,34 @@ spec: serviceAccountName: constellation-operator-controller-manager terminationGracePeriodSeconds: 10 tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists volumes: - - hostPath: - path: /etc/kubernetes/pki/etcd - type: Directory - name: etcd-certs - - hostPath: - path: /usr/lib/os-release - type: File - name: usr-lib-os-release - - hostPath: - path: /etc/os-release - type: File - name: etc-os-release - - name: azureconfig - secret: - optional: true - secretName: azureconfig - - configMap: - name: gceconf - optional: true + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - - name: upgrade-agent-socket - hostPath: - path: /run/constellation-upgrade-agent.sock - type: Socket + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml index 56bf77080..4fa4863c8 100644 --- a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -16,7 +16,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -62,10 +61,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -78,20 +73,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -110,6 +123,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index e364d1498..fa9f582a4 100644 --- a/internal/constellation/helm/testdata/Azure/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/Azure/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -15,13 +15,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/testdata/Azure/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/testdata/Azure/constellation-services/charts/join-service/templates/daemonset.yaml index b6fcd3f6b..05f397876 100644 --- a/internal/constellation/helm/testdata/Azure/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/testdata/Azure/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider=Azure - --key-service-endpoint=key-service.testNamespace:9000 - --attestation-variant=azure-sev-snp - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: /var/config name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: 9090 name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/deployment.yaml index fa41d793b..01d9e6f57 100644 --- a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -37,63 +37,58 @@ spec: kubectl.kubernetes.io/default-container: manager spec: containers: - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=:8080 - - --leader-elect - command: - - /node-operator - env: - - name: KUBERNETES_CLUSTER_DOMAIN - value: cluster.local - - name: CONSTEL_CSP - value: GCP - - name: constellation-uid - value: "42424242424242" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json - image: constellationOperatorImage - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - volumeMounts: - - mountPath: /etc/kubernetes/pki/etcd - name: etcd-certs - - mountPath: /host/usr/lib/os-release - name: usr-lib-os-release - - mountPath: /etc/os-release - name: etc-os-release - - mountPath: /etc/azure - name: azureconfig - readOnly: true - - mountPath: /etc/gce - name: gceconf - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /etc/constellation-upgrade-agent.sock - name: upgrade-agent-socket - readOnly: true + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /node-operator + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: GCP + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true nodeSelector: node-role.kubernetes.io/control-plane: "" securityContext: @@ -101,38 +96,34 @@ spec: serviceAccountName: constellation-operator-controller-manager terminationGracePeriodSeconds: 10 tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists volumes: - - hostPath: - path: /etc/kubernetes/pki/etcd - type: Directory - name: etcd-certs - - hostPath: - path: /usr/lib/os-release - type: File - name: usr-lib-os-release - - hostPath: - path: /etc/os-release - type: File - name: etc-os-release - - name: azureconfig - secret: - optional: true - secretName: azureconfig - - configMap: - name: gceconf - optional: true + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - - name: upgrade-agent-socket - hostPath: - path: /run/constellation-upgrade-agent.sock - type: Socket + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml index 56bf77080..4fa4863c8 100644 --- a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -16,7 +16,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -62,10 +61,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -78,20 +73,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -110,6 +123,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index e364d1498..fa9f582a4 100644 --- a/internal/constellation/helm/testdata/GCP/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/GCP/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -15,13 +15,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/testdata/GCP/constellation-services/charts/ccm/templates/gcp-cm.yaml b/internal/constellation/helm/testdata/GCP/constellation-services/charts/ccm/templates/gcp-cm.yaml index c0ed7d331..5855fb988 100644 --- a/internal/constellation/helm/testdata/GCP/constellation-services/charts/ccm/templates/gcp-cm.yaml +++ b/internal/constellation/helm/testdata/GCP/constellation-services/charts/ccm/templates/gcp-cm.yaml @@ -4,4 +4,4 @@ metadata: name: gceconf namespace: testNamespace data: - gce.conf: "[global]\nproject-id = 42424242424242\nuse-metadata-server = true\nnode-tags = constellation-242424242424\nregional = true\ntoken-url = nil # This forces use of GOOGLE_APPLICATION_CREDENTIALS." + gce.conf: "[global]\nproject-id = 42424242424242\nuse-metadata-server = true\nnode-tags = constellation-242424242424\nregional = true\n" diff --git a/internal/constellation/helm/testdata/GCP/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/testdata/GCP/constellation-services/charts/join-service/templates/daemonset.yaml index bbe9747ba..0ddfa9201 100644 --- a/internal/constellation/helm/testdata/GCP/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/testdata/GCP/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider=GCP - --key-service-endpoint=key-service.testNamespace:9000 - --attestation-variant=gcp-sev-es - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: /var/config name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: 9090 name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml index fa41d793b..01d9e6f57 100644 --- a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -37,63 +37,58 @@ spec: kubectl.kubernetes.io/default-container: manager spec: containers: - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=:8080 - - --leader-elect - command: - - /node-operator - env: - - name: KUBERNETES_CLUSTER_DOMAIN - value: cluster.local - - name: CONSTEL_CSP - value: GCP - - name: constellation-uid - value: "42424242424242" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json - image: constellationOperatorImage - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - volumeMounts: - - mountPath: /etc/kubernetes/pki/etcd - name: etcd-certs - - mountPath: /host/usr/lib/os-release - name: usr-lib-os-release - - mountPath: /etc/os-release - name: etc-os-release - - mountPath: /etc/azure - name: azureconfig - readOnly: true - - mountPath: /etc/gce - name: gceconf - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /etc/constellation-upgrade-agent.sock - name: upgrade-agent-socket - readOnly: true + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --leader-elect + command: + - /node-operator + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: cluster.local + - name: CONSTEL_CSP + value: GCP + - name: constellation-uid + value: "42424242424242" + image: constellationOperatorImage + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - mountPath: /host/usr/lib/os-release + name: usr-lib-os-release + - mountPath: /etc/os-release + name: etc-os-release + - mountPath: /etc/azure + name: azureconfig + readOnly: true + - mountPath: /etc/gce + name: gceconf + readOnly: true + - mountPath: /etc/constellation-upgrade-agent.sock + name: upgrade-agent-socket + readOnly: true nodeSelector: node-role.kubernetes.io/control-plane: "" securityContext: @@ -101,38 +96,34 @@ spec: serviceAccountName: constellation-operator-controller-manager terminationGracePeriodSeconds: 10 tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists volumes: - - hostPath: - path: /etc/kubernetes/pki/etcd - type: Directory - name: etcd-certs - - hostPath: - path: /usr/lib/os-release - type: File - name: usr-lib-os-release - - hostPath: - path: /etc/os-release - type: File - name: etc-os-release - - name: azureconfig - secret: - optional: true - secretName: azureconfig - - configMap: - name: gceconf - optional: true + - hostPath: + path: /etc/kubernetes/pki/etcd + type: Directory + name: etcd-certs + - hostPath: + path: /usr/lib/os-release + type: File + name: usr-lib-os-release + - hostPath: + path: /etc/os-release + type: File + name: etc-os-release + - name: azureconfig + secret: + optional: true + secretName: azureconfig + - configMap: name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - - name: upgrade-agent-socket - hostPath: - path: /run/constellation-upgrade-agent.sock - type: Socket + optional: true + name: gceconf + - name: upgrade-agent-socket + hostPath: + path: /run/constellation-upgrade-agent.sock + type: Socket diff --git a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml index 56bf77080..4fa4863c8 100644 --- a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -16,7 +16,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -62,10 +61,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -78,20 +73,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -110,6 +123,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index e364d1498..fa9f582a4 100644 --- a/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/OpenStack/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -15,13 +15,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml index e680ff691..0ed907f4d 100644 --- a/internal/constellation/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/testdata/OpenStack/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider=OpenStack - --key-service-endpoint=key-service.testNamespace:9000 - --attestation-variant=qemu-vtpm - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: /var/config name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: 9090 name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/deployment.yaml b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/deployment.yaml index edad32c8d..99e4a790d 100644 --- a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/deployment.yaml +++ b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/deployment.yaml @@ -50,8 +50,6 @@ spec: value: QEMU - name: constellation-uid value: "42424242424242" - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json image: constellationOperatorImage livenessProbe: httpGet: @@ -88,9 +86,6 @@ spec: - mountPath: /etc/gce name: gceconf readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - mountPath: /etc/constellation-upgrade-agent.sock name: upgrade-agent-socket readOnly: true @@ -128,10 +123,6 @@ spec: name: gceconf optional: true name: gceconf - - name: gcekey - secret: - secretName: gcekey - optional: true - name: upgrade-agent-socket hostPath: path: /run/constellation-upgrade-agent.sock diff --git a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml index 56bf77080..4fa4863c8 100644 --- a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/constellation-operator/templates/manager-rbac.yaml @@ -16,7 +16,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -62,10 +61,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -78,20 +73,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -110,6 +123,84 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml index e364d1498..fa9f582a4 100644 --- a/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml +++ b/internal/constellation/helm/testdata/QEMU/constellation-operators/charts/node-maintenance-operator/templates/manager-rbac.yaml @@ -15,13 +15,7 @@ rules: resources: - namespaces verbs: - - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - "" resources: diff --git a/internal/constellation/helm/testdata/QEMU/constellation-services/charts/join-service/templates/daemonset.yaml b/internal/constellation/helm/testdata/QEMU/constellation-services/charts/join-service/templates/daemonset.yaml index 1bd150448..71ad80428 100644 --- a/internal/constellation/helm/testdata/QEMU/constellation-services/charts/join-service/templates/daemonset.yaml +++ b/internal/constellation/helm/testdata/QEMU/constellation-services/charts/join-service/templates/daemonset.yaml @@ -40,9 +40,6 @@ spec: - --cloud-provider=QEMU - --key-service-endpoint=key-service.testNamespace:9000 - --attestation-variant=qemu-vtpm - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json volumeMounts: - mountPath: /var/config name: config @@ -50,14 +47,6 @@ spec: - mountPath: /etc/kubernetes name: kubeadm readOnly: true - - mountPath: /var/kubeadm-config - name: kubeadm-config - readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - - mountPath: /var/run/state/ssh - name: ssh ports: - containerPort: 9090 name: tcp @@ -65,10 +54,6 @@ spec: securityContext: privileged: true volumes: - - name: gcekey - secret: - secretName: gcekey - optional: true - name: config projected: sources: @@ -79,10 +64,4 @@ spec: - name: kubeadm hostPath: path: /etc/kubernetes - - name: kubeadm-config - configMap: - name: kubeadm-config - - name: ssh - hostPath: - path: /var/run/state/ssh updateStrategy: {} diff --git a/internal/constellation/helm/update-aws-load-balancer-chart.sh b/internal/constellation/helm/update-aws-load-balancer-chart.sh index 797defaa8..1c6a8519c 100755 --- a/internal/constellation/helm/update-aws-load-balancer-chart.sh +++ b/internal/constellation/helm/update-aws-load-balancer-chart.sh @@ -8,7 +8,7 @@ set -o errtrace shopt -s inherit_errexit echo "Updating AWS Load Balancer Controller Helm chart..." -branch="v0.0.190" # releases can update the AWS load-balancer-controller chart +branch="v0.0.140" # releases can update the AWS load-balancer-controller chart # Required tools if ! command -v git &> /dev/null; then echo "git could not be found" diff --git a/internal/constellation/helm/update-csi-charts.sh b/internal/constellation/helm/update-csi-charts.sh index de3486226..96dba4a02 100755 --- a/internal/constellation/helm/update-csi-charts.sh +++ b/internal/constellation/helm/update-csi-charts.sh @@ -68,13 +68,13 @@ download_chart() { } ## AWS CSI Driver -download_chart "https://github.com/edgelesssys/constellation-aws-ebs-csi-driver" "v1.2.0" "charts/aws-ebs-csi-driver" "aws-csi-driver" +download_chart "https://github.com/edgelesssys/constellation-aws-ebs-csi-driver" "v1.1.1" "charts/aws-ebs-csi-driver" "aws-csi-driver" ## Azure CSI Driver -download_chart "https://github.com/edgelesssys/constellation-azuredisk-csi-driver" "v1.4.0" "charts/edgeless" "azuredisk-csi-driver" +download_chart "https://github.com/edgelesssys/constellation-azuredisk-csi-driver" "v1.3.0" "charts/edgeless" "azuredisk-csi-driver" ## GCP CSI Driver -download_chart "https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver" "v1.4.0" "charts" "gcp-compute-persistent-disk-csi-driver" +download_chart "https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver" "v1.3.0" "charts" "gcp-compute-persistent-disk-csi-driver" ## OpenStack CSI Driver (cinder) download_chart "https://github.com/edgelesssys/constellation-cloud-provider-openstack" "v1.0.2" "charts/cinder-csi-plugin" "openstack-cinder-csi" diff --git a/internal/constellation/helm/values.go b/internal/constellation/helm/values.go index 807f84b0e..bb36cf0fe 100644 --- a/internal/constellation/helm/values.go +++ b/internal/constellation/helm/values.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/helm/versionlister.go b/internal/constellation/helm/versionlister.go index c5faf1aea..526cfebe3 100644 --- a/internal/constellation/helm/versionlister.go +++ b/internal/constellation/helm/versionlister.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package helm diff --git a/internal/constellation/kubecmd/BUILD.bazel b/internal/constellation/kubecmd/BUILD.bazel index aca26d0bb..71bae3c32 100644 --- a/internal/constellation/kubecmd/BUILD.bazel +++ b/internal/constellation/kubecmd/BUILD.bazel @@ -30,11 +30,8 @@ go_library( "@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured", "@io_k8s_apimachinery//pkg/runtime", "@io_k8s_apimachinery//pkg/runtime/schema", - "@io_k8s_apimachinery//pkg/runtime/serializer/json", "@io_k8s_client_go//util/retry", - "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm", - "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/scheme", - "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta4", + "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", "@io_k8s_sigs_yaml//:yaml", ], ) diff --git a/internal/constellation/kubecmd/backup.go b/internal/constellation/kubecmd/backup.go index 93a9cada7..c7e32d5be 100644 --- a/internal/constellation/kubecmd/backup.go +++ b/internal/constellation/kubecmd/backup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubecmd @@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr for i := range crds { path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml") - k.log.Debug("Creating CRD backup", "path", path) + k.log.Debug(fmt.Sprintf("Creating CRD backup: %s", path)) // We have to manually set kind/apiversion because of a long-standing limitation of the API: // https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738 @@ -64,7 +64,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error { k.log.Debug("Starting CR backup") for _, crd := range crds { - k.log.Debug("Creating backup", "crdName", crd.Name) + k.log.Debug(fmt.Sprintf("Creating backup for resource type: %s", crd.Name)) // Iterate over all versions of the CRD // TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead @@ -72,7 +72,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds // a version that is not installed in the cluster. // With the StoredVersions field, we could only iterate over the installed versions. for _, version := range crd.Spec.Versions { - k.log.Debug("Starting CustomResource backup", "crdName", crd.Name, "version", version.Name) + k.log.Debug(fmt.Sprintf("Creating backup of CRs for %q at version %q", crd.Name, version.Name)) gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural} crs, err := k.kubectl.ListCRs(ctx, gvr) @@ -80,7 +80,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds if !k8serrors.IsNotFound(err) { return fmt.Errorf("retrieving CR %s: %w", crd.Name, err) } - k.log.Debug("No CustomResources found. Skipping...", "crdName", crd.Name, "version", version.Name) + k.log.Debug(fmt.Sprintf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name)) continue } @@ -101,9 +101,9 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds } } - k.log.Debug("CustomResource backup complete", "crdName", crd.Name) + k.log.Debug(fmt.Sprintf("Backup for resource type %q complete", crd.Name)) } - k.log.Debug("All CustomResource backups completed") + k.log.Debug("CR backup complete") return nil } diff --git a/internal/constellation/kubecmd/backup_test.go b/internal/constellation/kubecmd/backup_test.go index ac6e42d54..a95c26be5 100644 --- a/internal/constellation/kubecmd/backup_test.go +++ b/internal/constellation/kubecmd/backup_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubecmd @@ -57,7 +57,7 @@ func TestBackupCRDs(t *testing.T) { log: stubLog{}, } - _, err = client.BackupCRDs(t.Context(), file.NewHandler(memFs), tc.upgradeID) + _, err = client.BackupCRDs(context.Background(), file.NewHandler(memFs), tc.upgradeID) if tc.wantError { assert.Error(err) return @@ -146,7 +146,7 @@ func TestBackupCRs(t *testing.T) { log: stubLog{}, } - err := client.BackupCRs(t.Context(), file.NewHandler(memFs), []apiextensionsv1.CustomResourceDefinition{tc.crd}, tc.upgradeID) + err := client.BackupCRs(context.Background(), file.NewHandler(memFs), []apiextensionsv1.CustomResourceDefinition{tc.crd}, tc.upgradeID) if tc.wantError { assert.Error(err) return diff --git a/internal/constellation/kubecmd/kubecmd.go b/internal/constellation/kubecmd/kubecmd.go index 7fb911e44..dedb4539b 100644 --- a/internal/constellation/kubecmd/kubecmd.go +++ b/internal/constellation/kubecmd/kubecmd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -42,11 +42,13 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - kubeadmv1beta4 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4" + kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" + "sigs.k8s.io/yaml" +) + +const ( + maxRetryAttempts = 20 ) // ErrInProgress signals that an upgrade is in progress inside the cluster. @@ -67,7 +69,6 @@ func (e *applyError) Error() string { type KubeCmd struct { kubectl kubectlInterface retryInterval time.Duration - maxAttempts int log debugLog } @@ -81,7 +82,6 @@ func New(kubeConfig []byte, log debugLog) (*KubeCmd, error) { return &KubeCmd{ kubectl: client, retryInterval: time.Second * 5, - maxAttempts: 20, log: log, }, nil } @@ -103,7 +103,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv return fmt.Errorf("updating image version: %w", err) } - k.log.Debug("Updating local copy of nodeVersion image version", "oldVersion", nodeVersion.Spec.ImageVersion, "newVersion", imageVersion.String()) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())) nodeVersion.Spec.ImageReference = imageReference nodeVersion.Spec.ImageVersion = imageVersion.String() @@ -121,43 +121,41 @@ func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersio return err } + var upgradeErr *compatibility.InvalidUpgradeError // We have to allow users to specify outdated k8s patch versions. // Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version. - if _, err := versions.NewValidK8sVersion(string(kubernetesVersion), true); err != nil { - return fmt.Errorf("skipping Kubernetes upgrade: %w", compatibility.NewInvalidUpgradeError( - nodeVersion.Spec.KubernetesClusterVersion, - string(kubernetesVersion), - fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))), - ) - } - - // TODO(burgerdev): remove after releasing v2.19 - // Workaround for https://github.com/kubernetes/kubernetes/issues/127316: force kubelet to - // connect to the local API server. - if err := k.patchKubeadmConfig(ctx, func(cc *kubeadm.ClusterConfiguration) { - if cc.FeatureGates == nil { - cc.FeatureGates = map[string]bool{} - } - cc.FeatureGates["ControlPlaneKubeletLocalMode"] = true - }); err != nil { - return fmt.Errorf("setting FeatureGate ControlPlaneKubeletLocalMode: %w", err) - } - - versionConfig, ok := versions.VersionConfigs[kubernetesVersion] - if !ok { - return fmt.Errorf("skipping Kubernetes upgrade: %w", compatibility.NewInvalidUpgradeError( - nodeVersion.Spec.KubernetesClusterVersion, - string(kubernetesVersion), - fmt.Errorf("no version config matching K8s %s", kubernetesVersion), - )) - } - components, err := k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force) + var components *corev1.ConfigMap + _, err = versions.NewValidK8sVersion(string(kubernetesVersion), true) if err != nil { - return err + err = compatibility.NewInvalidUpgradeError( + nodeVersion.Spec.KubernetesClusterVersion, + string(kubernetesVersion), + fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", ")), + ) + } else { + versionConfig, ok := versions.VersionConfigs[kubernetesVersion] + if !ok { + err = compatibility.NewInvalidUpgradeError( + nodeVersion.Spec.KubernetesClusterVersion, + string(kubernetesVersion), + fmt.Errorf("no version config matching K8s %s", kubernetesVersion), + ) + } else { + components, err = k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion, + versionConfig.KubernetesComponents, force) + } } - if err := k.applyComponentsCM(ctx, components); err != nil { - return fmt.Errorf("applying k8s components ConfigMap: %w", err) + switch { + case err == nil: + err := k.applyComponentsCM(ctx, components) + if err != nil { + return fmt.Errorf("applying k8s components ConfigMap: %w", err) + } + case errors.As(err, &upgradeErr): + return fmt.Errorf("skipping Kubernetes upgrade: %w", err) + default: + return fmt.Errorf("updating Kubernetes version: %w", err) } updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion) @@ -169,13 +167,8 @@ func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersio // ClusterStatus returns a map from node name to NodeStatus. func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, error) { - var nodes []corev1.Node - - if err := k.retryAction(ctx, func(ctx context.Context) error { - var err error - nodes, err = k.kubectl.GetNodes(ctx) - return err - }); err != nil { + nodes, err := k.kubectl.GetNodes(ctx) + if err != nil { return nil, fmt.Errorf("getting nodes: %w", err) } @@ -190,7 +183,7 @@ func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, err // GetClusterAttestationConfig fetches the join-config configmap from the cluster, // and returns the attestation config. func (k *KubeCmd) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) { - existingConf, err := k.retryGetJoinConfig(ctx) + existingConf, err := retryGetJoinConfig(ctx, k.kubectl, k.retryInterval, k.log) if err != nil { return nil, fmt.Errorf("retrieving current attestation config: %w", err) } @@ -215,19 +208,19 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At return fmt.Errorf("marshaling attestation config: %w", err) } - joinConfig, err := k.retryGetJoinConfig(ctx) + joinConfig, err := retryGetJoinConfig(ctx, k.kubectl, k.retryInterval, k.log) if err != nil { if !k8serrors.IsNotFound(err) { return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err) } - k.log.Debug("ConfigMap does not exist, creating it now", "name", constants.JoinConfigMap, "namespace", constants.ConstellationNamespace) - if err := k.retryAction(ctx, func(ctx context.Context) error { + k.log.Debug(fmt.Sprintf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace)) + if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { return k.kubectl.CreateConfigMap(ctx, joinConfigMap(newConfigJSON, measurementSalt)) - }); err != nil { + }, k.log); err != nil { return fmt.Errorf("creating join-config ConfigMap: %w", err) } - k.log.Debug("Created ConfigMap", "name", constants.JoinConfigMap, "namespace", constants.ConstellationNamespace) + k.log.Debug(fmt.Sprintf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace)) return nil } @@ -235,10 +228,10 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At joinConfig.Data[constants.AttestationConfigFilename+"_backup"] = joinConfig.Data[constants.AttestationConfigFilename] joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON) k.log.Debug("Triggering attestation config update now") - if err := k.retryAction(ctx, func(ctx context.Context) error { + if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { _, err = k.kubectl.UpdateConfigMap(ctx, joinConfig) return err - }); err != nil { + }, k.log); err != nil { return fmt.Errorf("setting new attestation config: %w", err) } @@ -248,32 +241,45 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At // ExtendClusterConfigCertSANs extends the ClusterConfig stored under "kube-system/kubeadm-config" with the given SANs. // Empty strings are ignored, existing SANs are preserved. func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error { - if err := k.patchKubeadmConfig(ctx, func(clusterConfiguration *kubeadm.ClusterConfiguration) { - existingSANs := make(map[string]struct{}) - for _, existingSAN := range clusterConfiguration.APIServer.CertSANs { - existingSANs[existingSAN] = struct{}{} - } + clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx) + if err != nil { + return fmt.Errorf("getting ClusterConfig: %w", err) + } - var missingSANs []string - for _, san := range alternativeNames { - if san == "" { - continue // skip empty SANs - } - if _, ok := existingSANs[san]; !ok { - missingSANs = append(missingSANs, san) - existingSANs[san] = struct{}{} // make sure we don't add the same SAN twice - } - } + existingSANs := make(map[string]struct{}) + for _, existingSAN := range clusterConfiguration.APIServer.CertSANs { + existingSANs[existingSAN] = struct{}{} + } - if len(missingSANs) == 0 { - k.log.Debug("No new SANs to add to the cluster's apiserver SAN field") + var missingSANs []string + for _, san := range alternativeNames { + if san == "" { + continue // skip empty SANs } - k.log.Debug("Extending the cluster's apiserver SAN field", "certSANs", strings.Join(missingSANs, ", ")) + if _, ok := existingSANs[san]; !ok { + missingSANs = append(missingSANs, san) + existingSANs[san] = struct{}{} // make sure we don't add the same SAN twice + } + } - clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...) - sort.Strings(clusterConfiguration.APIServer.CertSANs) - }); err != nil { - return fmt.Errorf("extending ClusterConfig.CertSANs: %w", err) + if len(missingSANs) == 0 { + k.log.Debug("No new SANs to add to the cluster's apiserver SAN field") + return nil + } + k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))) + + clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...) + sort.Strings(clusterConfiguration.APIServer.CertSANs) + + newConfigYAML, err := yaml.Marshal(clusterConfiguration) + if err != nil { + return fmt.Errorf("marshaling ClusterConfiguration: %w", err) + } + + kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML) + k.log.Debug("Triggering kubeadm config update now") + if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil { + return fmt.Errorf("setting new kubeadm config: %w", err) } k.log.Debug("Successfully extended the cluster's apiserver SAN field") @@ -293,19 +299,14 @@ func (k *KubeCmd) GetConstellationVersion(ctx context.Context) (NodeVersion, err // getConstellationVersion returns the NodeVersion object of a Constellation cluster. func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.NodeVersion, error) { - var raw *unstructured.Unstructured - if err := k.retryAction(ctx, func(ctx context.Context) error { - var err error - raw, err = k.kubectl.GetCR(ctx, schema.GroupVersionResource{ - Group: "update.edgeless.systems", - Version: "v1alpha1", - Resource: "nodeversions", - }, constants.NodeVersionResourceName) - return err - }); err != nil { + raw, err := k.kubectl.GetCR(ctx, schema.GroupVersionResource{ + Group: "update.edgeless.systems", + Version: "v1alpha1", + Resource: "nodeversions", + }, constants.NodeVersionResourceName) + if err != nil { return updatev1alpha1.NodeVersion{}, err } - var nodeVersion updatev1alpha1.NodeVersion if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil { return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err) @@ -314,18 +315,31 @@ func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.N return nodeVersion, nil } +// getClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config +// and returns both the full configmap and the ClusterConfiguration. +func (k *KubeCmd) getClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) { + existingConf, err := k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap) + if err != nil { + return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err) + } + clusterConf, ok := existingConf.Data[constants.ClusterConfigurationKey] + if !ok { + return kubeadmv1beta3.ClusterConfiguration{}, nil, errors.New("ClusterConfiguration missing from kubeadm-config") + } + + var existingClusterConfig kubeadmv1beta3.ClusterConfiguration + if err := yaml.Unmarshal([]byte(clusterConf), &existingClusterConfig); err != nil { + return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("unmarshaling ClusterConfiguration: %w", err) + } + + return existingClusterConfig, existingConf, nil +} + // applyComponentsCM applies the k8s components ConfigMap to the cluster. func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error { - if err := k.retryAction(ctx, func(ctx context.Context) error { - // If the components ConfigMap already exists we assume it is up to date, - // since its name is derived from a hash of its contents. - err := k.kubectl.CreateConfigMap(ctx, components) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return err - } - return nil - }); err != nil { - return fmt.Errorf("creating k8s-components ConfigMap: %w", err) + // If the map already exists we can use that map and assume it has the same content as 'configMap'. + if err := k.kubectl.CreateConfigMap(ctx, components); err != nil && !k8serrors.IsAlreadyExists(err) { + return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err) } return nil } @@ -333,35 +347,31 @@ func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.Conf func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) { k.log.Debug("Triggering NodeVersion upgrade now") var updatedNodeVersion updatev1alpha1.NodeVersion + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + newNode, err := k.getConstellationVersion(ctx) + if err != nil { + return fmt.Errorf("retrieving current NodeVersion: %w", err) + } - // Retry the entire "retry-on-conflict" block to retry if the block fails, e.g. due to etcd timeouts. - err := k.retryAction(ctx, func(ctx context.Context) error { - return retry.RetryOnConflict(retry.DefaultBackoff, func() error { - newNode, err := k.getConstellationVersion(ctx) - if err != nil { - return fmt.Errorf("retrieving current NodeVersion: %w", err) - } + updateNodeVersions(nodeVersion, &newNode) - updateNodeVersions(nodeVersion, &newNode) + raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode) + if err != nil { + return fmt.Errorf("converting nodeVersion to unstructured: %w", err) + } + updated, err := k.kubectl.UpdateCR(ctx, schema.GroupVersionResource{ + Group: "update.edgeless.systems", + Version: "v1alpha1", + Resource: "nodeversions", + }, &unstructured.Unstructured{Object: raw}) + if err != nil { + return err + } - raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode) - if err != nil { - return fmt.Errorf("converting nodeVersion to unstructured: %w", err) - } - updated, err := k.kubectl.UpdateCR(ctx, schema.GroupVersionResource{ - Group: "update.edgeless.systems", - Version: "v1alpha1", - Resource: "nodeversions", - }, &unstructured.Unstructured{Object: raw}) - if err != nil { - return err - } - - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil { - return fmt.Errorf("converting unstructured to NodeVersion: %w", err) - } - return nil - }) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil { + return fmt.Errorf("converting unstructured to NodeVersion: %w", err) + } + return nil }) return updatedNodeVersion, err @@ -395,97 +405,17 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC } if !force { if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil { - return nil, fmt.Errorf("skipping Kubernetes upgrade: %w", err) + return nil, err } } - k.log.Debug("Updating local copy of nodeVersion Kubernetes version", "oldVersion", nodeVersion.Spec.KubernetesClusterVersion, "newVersion", newClusterVersion) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)) nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion return &configMap, nil } -func (k *KubeCmd) retryGetJoinConfig(ctx context.Context) (*corev1.ConfigMap, error) { - var ctr int - retrieable := func(err error) bool { - if k8serrors.IsNotFound(err) { - return false - } - ctr++ - k.log.Debug("Getting join-config ConfigMap failed", "attempt", ctr, "maxAttempts", k.maxAttempts, "error", err) - return ctr < k.maxAttempts - } - - var joinConfig *corev1.ConfigMap - var err error - doer := &kubeDoer{ - action: func(ctx context.Context) error { - joinConfig, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap) - return err - }, - } - retrier := conretry.NewIntervalRetrier(doer, k.retryInterval, retrieable) - - err = retrier.Do(ctx) - return joinConfig, err -} - -func (k *KubeCmd) retryAction(ctx context.Context, action func(ctx context.Context) error) error { - ctr := 0 - retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, k.retryInterval, func(err error) bool { - ctr++ - k.log.Debug("Action failed", "attempt", ctr, "maxAttempts", k.maxAttempts, "error", err) - return ctr < k.maxAttempts - }) - return retrier.Do(ctx) -} - -// patchKubeadmConfig fetches and unpacks the kube-system/kubeadm-config ClusterConfiguration entry, -// runs doPatch on it and uploads the result. -func (k *KubeCmd) patchKubeadmConfig(ctx context.Context, doPatch func(*kubeadm.ClusterConfiguration)) error { - var kubeadmConfig *corev1.ConfigMap - if err := k.retryAction(ctx, func(ctx context.Context) error { - var err error - kubeadmConfig, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap) - return err - }); err != nil { - return fmt.Errorf("retrieving current kubeadm-config: %w", err) - } - - clusterConfigData, ok := kubeadmConfig.Data[constants.ClusterConfigurationKey] - if !ok { - return errors.New("ClusterConfiguration missing from kubeadm-config") - } - - var clusterConfiguration kubeadm.ClusterConfiguration - if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(clusterConfigData), &clusterConfiguration); err != nil { - return fmt.Errorf("decoding cluster configuration data: %w", err) - } - - doPatch(&clusterConfiguration) - - opt := k8sjson.SerializerOptions{Yaml: true} - serializer := k8sjson.NewSerializerWithOptions(k8sjson.DefaultMetaFactory, kubeadmscheme.Scheme, kubeadmscheme.Scheme, opt) - encoder := kubeadmscheme.Codecs.EncoderForVersion(serializer, kubeadmv1beta4.SchemeGroupVersion) - newConfigYAML, err := runtime.Encode(encoder, &clusterConfiguration) - if err != nil { - return fmt.Errorf("marshaling ClusterConfiguration: %w", err) - } - - kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML) - k.log.Debug("Triggering kubeadm config update now") - if err = k.retryAction(ctx, func(ctx context.Context) error { - _, err := k.kubectl.UpdateConfigMap(ctx, kubeadmConfig) - return err - }); err != nil { - return fmt.Errorf("setting new kubeadm config: %w", err) - } - - k.log.Debug("Successfully patched the cluster's kubeadm-config") - return nil -} - func checkForApplyError(expected, actual updatev1alpha1.NodeVersion) error { var err error switch { @@ -524,6 +454,41 @@ func (k *kubeDoer) Do(ctx context.Context) error { return k.action(ctx) } +func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInterval time.Duration, log debugLog) (*corev1.ConfigMap, error) { + var retries int + retrieable := func(err error) bool { + if k8serrors.IsNotFound(err) { + return false + } + retries++ + log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err)) + return retries < maxRetryAttempts + } + + var joinConfig *corev1.ConfigMap + var err error + doer := &kubeDoer{ + action: func(ctx context.Context) error { + joinConfig, err = kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap) + return err + }, + } + retrier := conretry.NewIntervalRetrier(doer, retryInterval, retrieable) + + err = retrier.Do(ctx) + return joinConfig, err +} + +func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries int, action func(ctx context.Context) error, log debugLog) error { + ctr := 0 + retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool { + ctr++ + log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err)) + return ctr < maxRetries + }) + return retrier.Do(ctx) +} + // kubectlInterface provides access to the Kubernetes API. type kubectlInterface interface { GetNodes(ctx context.Context) ([]corev1.Node, error) diff --git a/internal/constellation/kubecmd/kubecmd_test.go b/internal/constellation/kubecmd/kubecmd_test.go index 3e861afb7..cdaf99921 100644 --- a/internal/constellation/kubecmd/kubecmd_test.go +++ b/internal/constellation/kubecmd/kubecmd_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubecmd @@ -174,13 +174,11 @@ func TestUpgradeNodeImage(t *testing.T) { } upgrader := KubeCmd{ - kubectl: kubectl, - retryInterval: time.Millisecond, - maxAttempts: 5, - log: logger.NewTest(t), + kubectl: kubectl, + log: logger.NewTest(t), } - err = upgrader.UpgradeNodeImage(t.Context(), tc.newImageVersion, fmt.Sprintf("/path/to/image:%s", tc.newImageVersion.String()), tc.force) + err = upgrader.UpgradeNodeImage(context.Background(), tc.newImageVersion, fmt.Sprintf("/path/to/image:%s", tc.newImageVersion.String()), tc.force) // Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade. if tc.wantUpdate { assert.NotNil(unstructuredClient.updatedObject) @@ -281,22 +279,17 @@ func TestUpgradeKubernetesVersion(t *testing.T) { } kubectl := &stubKubectl{ unstructuredInterface: unstructuredClient, - configMaps: map[string]*corev1.ConfigMap{ - constants.KubeadmConfigMap: {Data: map[string]string{"ClusterConfiguration": kubeadmClusterConfigurationV1Beta4}}, - }, } if tc.customClientFn != nil { kubectl.unstructuredInterface = tc.customClientFn(nodeVersion) } upgrader := KubeCmd{ - kubectl: kubectl, - retryInterval: time.Millisecond, - maxAttempts: 5, - log: logger.NewTest(t), + kubectl: kubectl, + log: logger.NewTest(t), } - err = upgrader.UpgradeKubernetesVersion(t.Context(), tc.newKubernetesVersion, tc.force) + err = upgrader.UpgradeKubernetesVersion(context.Background(), tc.newKubernetesVersion, tc.force) // Check upgrades first because if we checked err first, UpgradeImage may error due to other reasons and still trigger an upgrade. if tc.wantUpdate { assert.NotNil(unstructuredClient.updatedObject) @@ -348,9 +341,7 @@ func TestIsValidImageUpgrade(t *testing.T) { assert := assert.New(t) upgrader := &KubeCmd{ - retryInterval: time.Millisecond, - maxAttempts: 5, - log: logger.NewTest(t), + log: logger.NewTest(t), } nodeVersion := updatev1alpha1.NodeVersion{ @@ -401,9 +392,7 @@ func TestUpdateK8s(t *testing.T) { assert := assert.New(t) upgrader := &KubeCmd{ - retryInterval: time.Millisecond, - maxAttempts: 5, - log: logger.NewTest(t), + log: logger.NewTest(t), } nodeVersion := updatev1alpha1.NodeVersion{ @@ -600,10 +589,9 @@ func TestApplyJoinConfig(t *testing.T) { kubectl: tc.kubectl, log: logger.NewTest(t), retryInterval: time.Millisecond, - maxAttempts: 5, } - err := cmd.ApplyJoinConfig(t.Context(), tc.newAttestationCfg, []byte{0x11}) + err := cmd.ApplyJoinConfig(context.Background(), tc.newAttestationCfg, []byte{0x11}) if tc.wantErr { assert.Error(err) return @@ -623,106 +611,6 @@ func TestApplyJoinConfig(t *testing.T) { } } -func TestRetryAction(t *testing.T) { - maxAttempts := 3 - - testCases := map[string]struct { - failures int - wantErr bool - }{ - "no failures": { - failures: 0, - }, - "fail once": { - failures: 1, - }, - "fail equal to maxAttempts": { - failures: maxAttempts, - wantErr: true, - }, - "fail more than maxAttempts": { - failures: maxAttempts + 5, - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - k := &KubeCmd{ - retryInterval: time.Millisecond, - maxAttempts: maxAttempts, - log: logger.NewTest(t), - } - - errs := map[int]error{} - for idx := range tc.failures { - errs[idx] = assert.AnError - } - - assert := assert.New(t) - - failureCtr := 0 - action := func(context.Context) error { - defer func() { failureCtr++ }() - return errs[failureCtr] - } - - err := k.retryAction(t.Context(), action) - if tc.wantErr { - assert.Error(err) - assert.Equal(min(tc.failures, maxAttempts), failureCtr) - return - } - assert.NoError(err) - assert.Equal(tc.failures, failureCtr-1) - }) - } -} - -func TestExtendClusterConfigCertSANs(t *testing.T) { - ctx := t.Context() - - testCases := map[string]struct { - clusterConfig string - }{ - "kubeadmv1beta3.ClusterConfiguration": { - clusterConfig: kubeadmClusterConfigurationV1Beta3, - }, - "kubeadmv1beta4.ClusterConfiguration": { - clusterConfig: kubeadmClusterConfigurationV1Beta4, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - require := require.New(t) - assert := assert.New(t) - kubectl := &fakeConfigMapClient{ - configMaps: map[string]*corev1.ConfigMap{ - constants.KubeadmConfigMap: {Data: map[string]string{"ClusterConfiguration": tc.clusterConfig}}, - }, - } - cmd := &KubeCmd{ - kubectl: kubectl, - log: logger.NewTest(t), - retryInterval: time.Millisecond, - } - - err := cmd.ExtendClusterConfigCertSANs(ctx, []string{"example.com"}) - require.NoError(err) - - cm := kubectl.configMaps["kubeadm-config"] - require.NotNil(cm) - cc := cm.Data["ClusterConfiguration"] - require.NotNil(cc) - // Verify that SAN was added. - assert.Contains(cc, "example.com") - // Verify that config was written in v1beta4, regardless of the version read. - assert.Contains(cc, "kubeadm.k8s.io/v1beta4") - }) - } -} - type fakeUnstructuredClient struct { mock.Mock } @@ -882,83 +770,3 @@ func supportedValidK8sVersions() (res []versions.ValidK8sVersion) { } return } - -var kubeadmClusterConfigurationV1Beta3 = ` -apiVersion: kubeadm.k8s.io/v1beta3 -kind: ClusterConfiguration -apiServer: - certSANs: - - 127.0.0.1 - extraArgs: - kubelet-certificate-authority: /etc/kubernetes/pki/ca.crt - profiling: "false" - extraVolumes: - - hostPath: /var/log/kubernetes/audit/ - mountPath: /var/log/kubernetes/audit/ - name: audit-log - pathType: DirectoryOrCreate -certificatesDir: /etc/kubernetes/pki -clusterName: test-55bbf58d -controlPlaneEndpoint: 34.149.125.227:6443 -controllerManager: - extraArgs: - cloud-provider: external -dns: - disabled: true -encryptionAlgorithm: RSA-2048 -etcd: - local: - dataDir: /var/lib/etcd -imageRepository: registry.k8s.io -kubernetesVersion: v1.31.1 -networking: - dnsDomain: cluster.local - serviceSubnet: 10.96.0.0/12 -proxy: - disabled: true -scheduler: - extraArgs: - profiling: "false" -` - -var kubeadmClusterConfigurationV1Beta4 = ` -apiVersion: kubeadm.k8s.io/v1beta4 -kind: ClusterConfiguration -apiServer: - certSANs: - - 127.0.0.1 - extraArgs: - - name: kubelet-certificate-authority - value: /etc/kubernetes/pki/ca.crt - - name: profiling - value: "false" - extraVolumes: - - hostPath: /var/log/kubernetes/audit/ - mountPath: /var/log/kubernetes/audit/ - name: audit-log - pathType: DirectoryOrCreate -certificatesDir: /etc/kubernetes/pki -clusterName: test-55bbf58d -controlPlaneEndpoint: 34.149.125.227:6443 -controllerManager: - extraArgs: - - name: cloud-provider - value: external -dns: - disabled: true -encryptionAlgorithm: RSA-2048 -etcd: - local: - dataDir: /var/lib/etcd -imageRepository: registry.k8s.io -kubernetesVersion: v1.31.1 -networking: - dnsDomain: cluster.local - serviceSubnet: 10.96.0.0/12 -proxy: - disabled: true -scheduler: - extraArgs: - - name: profiling - value: "false" -` diff --git a/internal/constellation/kubecmd/status.go b/internal/constellation/kubecmd/status.go index 328ed38ba..4b7838246 100644 --- a/internal/constellation/kubecmd/status.go +++ b/internal/constellation/kubecmd/status.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubecmd diff --git a/internal/constellation/kubernetes.go b/internal/constellation/kubernetes.go index 30b553816..af038adce 100644 --- a/internal/constellation/kubernetes.go +++ b/internal/constellation/kubernetes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation diff --git a/internal/constellation/serviceaccount.go b/internal/constellation/serviceaccount.go index 9c38c94e6..c88d92a19 100644 --- a/internal/constellation/serviceaccount.go +++ b/internal/constellation/serviceaccount.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constellation diff --git a/internal/constellation/state/state.go b/internal/constellation/state/state.go index af902900c..bee5f8b2b 100644 --- a/internal/constellation/state/state.go +++ b/internal/constellation/state/state.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // This binary can be build from siderolabs/talos projects. Located at: @@ -383,7 +383,7 @@ func (s *State) preInitConstraints(attestation variant.Variant) func() []*valida ), ) } - case variant.GCPSEVES{}, variant.GCPSEVSNP{}: + case variant.GCPSEVES{}: // GCP values need to be valid after infrastructure creation. constraints = append(constraints, // Azure values need to be nil or empty. @@ -514,7 +514,7 @@ func (s *State) postInitConstraints(attestation variant.Variant) func() []*valid ), ) } - case variant.GCPSEVES{}, variant.GCPSEVSNP{}: + case variant.GCPSEVES{}: constraints = append(constraints, // Azure values need to be nil or empty. validation.Or( diff --git a/internal/constellation/state/state_test.go b/internal/constellation/state/state_test.go index 402f49681..bf23e78bc 100644 --- a/internal/constellation/state/state_test.go +++ b/internal/constellation/state/state_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package state diff --git a/internal/constellation/state/validation_test.go b/internal/constellation/state/validation_test.go index 5c5b458fa..f3753e132 100644 --- a/internal/constellation/state/validation_test.go +++ b/internal/constellation/state/validation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package state diff --git a/internal/containerimage/containerimage.go b/internal/containerimage/containerimage.go index 6ed7d20a5..f5b5fd433 100644 --- a/internal/containerimage/containerimage.go +++ b/internal/containerimage/containerimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/crypto/BUILD.bazel b/internal/crypto/BUILD.bazel index 0b3e402d9..28131c022 100644 --- a/internal/crypto/BUILD.bazel +++ b/internal/crypto/BUILD.bazel @@ -6,10 +6,7 @@ go_library( srcs = ["crypto.go"], importpath = "github.com/edgelesssys/constellation/v2/internal/crypto", visibility = ["//:__subpackages__"], - deps = [ - "@org_golang_x_crypto//hkdf", - "@org_golang_x_crypto//ssh", - ], + deps = ["@org_golang_x_crypto//hkdf"], ) go_test( diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go index 788f4ec89..081e25d71 100644 --- a/internal/crypto/crypto.go +++ b/internal/crypto/crypto.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package crypto provides functions to for cryptography and random numbers. @@ -9,7 +9,6 @@ package crypto import ( "bytes" - "crypto/ed25519" "crypto/rand" "crypto/sha256" "crypto/x509" @@ -17,10 +16,8 @@ import ( "fmt" "io" "math/big" - "time" "golang.org/x/crypto/hkdf" - "golang.org/x/crypto/ssh" ) const ( @@ -65,41 +62,6 @@ func GenerateRandomBytes(length int) ([]byte, error) { return nonce, nil } -// GenerateEmergencySSHCAKey creates a CA that is used to sign keys for emergency ssh access. -func GenerateEmergencySSHCAKey(seed []byte) (ssh.Signer, error) { - _, priv, err := ed25519.GenerateKey(bytes.NewReader(seed)) - if err != nil { - return nil, err - } - ca, err := ssh.NewSignerFromSigner(priv) - if err != nil { - return nil, err - } - return ca, nil -} - -// GenerateSSHHostCertificate takes a given public key and CA to generate a host certificate. -func GenerateSSHHostCertificate(principals []string, publicKey ssh.PublicKey, ca ssh.Signer) (*ssh.Certificate, error) { - certificate := ssh.Certificate{ - CertType: ssh.HostCert, - ValidPrincipals: principals, - ValidAfter: uint64(time.Now().Unix()), - ValidBefore: ssh.CertTimeInfinity, - Reserved: []byte{}, - Key: publicKey, - KeyId: principals[0], - Permissions: ssh.Permissions{ - CriticalOptions: map[string]string{}, - Extensions: map[string]string{}, - }, - } - if err := certificate.SignCert(rand.Reader, ca); err != nil { - return nil, err - } - - return &certificate, nil -} - // PemToX509Cert takes a list of PEM-encoded certificates, parses the first one and returns it // as an x.509 certificate. func PemToX509Cert(raw []byte) (*x509.Certificate, error) { diff --git a/internal/crypto/crypto_test.go b/internal/crypto/crypto_test.go index a99e62dd4..674ec4c84 100644 --- a/internal/crypto/crypto_test.go +++ b/internal/crypto/crypto_test.go @@ -1,13 +1,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package crypto import ( - "crypto/ed25519" "crypto/x509" "testing" @@ -122,47 +121,6 @@ func TestGenerateRandomBytes(t *testing.T) { assert.Len(n3, 16) } -func TestGenerateEmergencySSHCAKey(t *testing.T) { - nullKey := make([]byte, ed25519.SeedSize) - - testCases := map[string]struct { - key []byte - wantErr bool - }{ - "key length = 0": { - key: make([]byte, 0), - wantErr: true, - }, - "valid key": { - key: nullKey, - }, - "nil input": { - key: nil, - wantErr: true, - }, - "long key": { - key: make([]byte, 256), - }, - "key too short": { - key: make([]byte, ed25519.SeedSize-1), - wantErr: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert := assert.New(t) - - _, err := GenerateEmergencySSHCAKey(tc.key) - if tc.wantErr { - assert.Error(err) - } else { - assert.NoError(err) - } - }) - } -} - func TestPemToX509Cert(t *testing.T) { testCases := map[string]struct { pemCert []byte diff --git a/internal/crypto/testvector/testvector.go b/internal/crypto/testvector/testvector.go index 1e02e13c0..38eac83be 100644 --- a/internal/crypto/testvector/testvector.go +++ b/internal/crypto/testvector/testvector.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testvector provides test vectors for key derivation and crypto functions. diff --git a/internal/cryptsetup/cryptsetup.go b/internal/cryptsetup/cryptsetup.go index 67e31825a..a61fb83e8 100644 --- a/internal/cryptsetup/cryptsetup.go +++ b/internal/cryptsetup/cryptsetup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/cryptsetup/cryptsetup_cgo.go b/internal/cryptsetup/cryptsetup_cgo.go index e8ac2e31a..555e07dfe 100644 --- a/internal/cryptsetup/cryptsetup_cgo.go +++ b/internal/cryptsetup/cryptsetup_cgo.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cryptsetup diff --git a/internal/cryptsetup/cryptsetup_cross.go b/internal/cryptsetup/cryptsetup_cross.go index 325a86be4..df1a30790 100644 --- a/internal/cryptsetup/cryptsetup_cross.go +++ b/internal/cryptsetup/cryptsetup_cross.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cryptsetup diff --git a/internal/encoding/encoding.go b/internal/encoding/encoding.go index c1fed1815..fba7f33da 100644 --- a/internal/encoding/encoding.go +++ b/internal/encoding/encoding.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package encoding provides data types and functions for JSON or YAML encoding/decoding. diff --git a/internal/encoding/encoding_test.go b/internal/encoding/encoding_test.go index 54600e88f..0c6e4a130 100644 --- a/internal/encoding/encoding_test.go +++ b/internal/encoding/encoding_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package encoding diff --git a/internal/file/file.go b/internal/file/file.go index 8bfb9ecbe..84e0104b2 100644 --- a/internal/file/file.go +++ b/internal/file/file.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -233,8 +233,8 @@ func (h *Handler) CopyFile(src, dst string, opts ...Option) error { } // RenameFile renames a file, overwriting any existing file at the destination. -func (h *Handler) RenameFile(a, b string) error { - return h.fs.Rename(a, b) +func (h *Handler) RenameFile(old, new string) error { + return h.fs.Rename(old, new) } // IsEmpty returns true if the given directory is empty. diff --git a/internal/file/file_test.go b/internal/file/file_test.go index e18341a18..35c6b3bfc 100644 --- a/internal/file/file_test.go +++ b/internal/file/file_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package file diff --git a/internal/grpc/atlscredentials/BUILD.bazel b/internal/grpc/atlscredentials/BUILD.bazel index c69ef3bda..8f16bd02f 100644 --- a/internal/grpc/atlscredentials/BUILD.bazel +++ b/internal/grpc/atlscredentials/BUILD.bazel @@ -21,7 +21,7 @@ go_test( "//internal/atls", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//test/bufconn", "@org_uber_go_goleak//:goleak", ], diff --git a/internal/grpc/atlscredentials/atlscredentials.go b/internal/grpc/atlscredentials/atlscredentials.go index cb1c1dca5..949f9af41 100644 --- a/internal/grpc/atlscredentials/atlscredentials.go +++ b/internal/grpc/atlscredentials/atlscredentials.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package atlscredentials handles creation of TLS credentials for attested TLS (ATLS). diff --git a/internal/grpc/atlscredentials/atlscredentials_test.go b/internal/grpc/atlscredentials/atlscredentials_test.go index d03a03e94..a9ddaa6ac 100644 --- a/internal/grpc/atlscredentials/atlscredentials_test.go +++ b/internal/grpc/atlscredentials/atlscredentials_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package atlscredentials @@ -66,14 +66,14 @@ func TestATLSCredentials(t *testing.T) { go func() { var err error defer func() { errChan <- err }() - conn, err := grpc.NewClient("192.0.2.1", grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + conn, err := grpc.DialContext(context.Background(), "", grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { return lis.Dial() }), grpc.WithTransportCredentials(clientCreds)) require.NoError(err) defer conn.Close() client := initproto.NewAPIClient(conn) - _, err = client.Init(t.Context(), &initproto.InitRequest{}) + _, err = client.Init(context.Background(), &initproto.InitRequest{}) }() } diff --git a/internal/grpc/dialer/BUILD.bazel b/internal/grpc/dialer/BUILD.bazel index 9dc1aaf8f..b8428ba2d 100644 --- a/internal/grpc/dialer/BUILD.bazel +++ b/internal/grpc/dialer/BUILD.bazel @@ -9,7 +9,7 @@ go_library( deps = [ "//internal/atls", "//internal/grpc/atlscredentials", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) @@ -25,7 +25,7 @@ go_test( "//internal/grpc/testdialer", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//interop/grpc_testing", "@org_uber_go_goleak//:goleak", ], diff --git a/internal/grpc/dialer/dialer.go b/internal/grpc/dialer/dialer.go index b81ad1fe4..8c42f4041 100644 --- a/internal/grpc/dialer/dialer.go +++ b/internal/grpc/dialer/dialer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package dialer provides a grpc dialer that can be used to create grpc client connections with different levels of ATLS encryption / verification. @@ -34,14 +34,14 @@ func New(issuer atls.Issuer, validator atls.Validator, netDialer NetDialer) *Dia } // Dial creates a new grpc client connection to the given target using the atls validator. -func (d *Dialer) Dial(target string) (*grpc.ClientConn, error) { +func (d *Dialer) Dial(ctx context.Context, target string) (*grpc.ClientConn, error) { var validators []atls.Validator if d.validator != nil { validators = append(validators, d.validator) } credentials := atlscredentials.New(d.issuer, validators) - return grpc.NewClient(target, + return grpc.DialContext(ctx, target, d.grpcWithDialer(), grpc.WithTransportCredentials(credentials), ) @@ -49,27 +49,24 @@ func (d *Dialer) Dial(target string) (*grpc.ClientConn, error) { // DialInsecure creates a new grpc client connection to the given target without using encryption or verification. // Only use this method when using another kind of encryption / verification (VPN, etc). -func (d *Dialer) DialInsecure(target string) (*grpc.ClientConn, error) { - return grpc.NewClient(target, +func (d *Dialer) DialInsecure(ctx context.Context, target string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, target, d.grpcWithDialer(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) } // DialNoVerify creates a new grpc client connection to the given target without verifying the server's attestation. -func (d *Dialer) DialNoVerify(target string) (*grpc.ClientConn, error) { +func (d *Dialer) DialNoVerify(ctx context.Context, target string) (*grpc.ClientConn, error) { credentials := atlscredentials.New(nil, nil) - return grpc.NewClient(target, + return grpc.DialContext(ctx, target, d.grpcWithDialer(), grpc.WithTransportCredentials(credentials), ) } func (d *Dialer) grpcWithDialer() grpc.DialOption { - if d.netDialer == nil { - return grpc.EmptyDialOption{} - } return grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return d.netDialer.DialContext(ctx, "tcp", addr) }) diff --git a/internal/grpc/dialer/dialer_test.go b/internal/grpc/dialer/dialer_test.go index 6520cf08d..c137ee727 100644 --- a/internal/grpc/dialer/dialer_test.go +++ b/internal/grpc/dialer/dialer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package dialer @@ -28,42 +28,42 @@ func TestMain(m *testing.M) { func TestDial(t *testing.T) { testCases := map[string]struct { tls bool - dialFn func(dialer *Dialer, target string) (*grpc.ClientConn, error) + dialFn func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) wantErr bool }{ "Dial with tls on server works": { tls: true, - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.Dial(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.Dial(ctx, target) }, }, "Dial without tls on server fails": { - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.Dial(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.Dial(ctx, target) }, wantErr: true, }, "DialNoVerify with tls on server works": { tls: true, - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.DialNoVerify(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.DialNoVerify(ctx, target) }, }, "DialNoVerify without tls on server fails": { - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.DialNoVerify(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.DialNoVerify(ctx, target) }, wantErr: true, }, "DialInsecure without tls on server works": { - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.DialInsecure(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.DialInsecure(ctx, target) }, }, "DialInsecure with tls on server fails": { tls: true, - dialFn: func(dialer *Dialer, target string) (*grpc.ClientConn, error) { - return dialer.DialInsecure(target) + dialFn: func(dialer *Dialer, ctx context.Context, target string) (*grpc.ClientConn, error) { + return dialer.DialInsecure(ctx, target) }, wantErr: true, }, @@ -81,12 +81,12 @@ func TestDial(t *testing.T) { grpc_testing.RegisterTestServiceServer(server, api) go server.Serve(netDialer.GetListener("192.0.2.1:1234")) defer server.Stop() - conn, err := tc.dialFn(dialer, "192.0.2.1:1234") + conn, err := tc.dialFn(dialer, context.Background(), "192.0.2.1:1234") require.NoError(err) defer conn.Close() client := grpc_testing.NewTestServiceClient(conn) - _, err = client.EmptyCall(t.Context(), &grpc_testing.Empty{}) + _, err = client.EmptyCall(context.Background(), &grpc_testing.Empty{}) if tc.wantErr { assert.Error(err) diff --git a/internal/grpc/grpclog/grpclog.go b/internal/grpc/grpclog/grpclog.go index c92a4f7f4..e29c990b6 100644 --- a/internal/grpc/grpclog/grpclog.go +++ b/internal/grpc/grpclog/grpclog.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // grpclog provides a logging utilities for gRPC. @@ -31,15 +31,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog go func() { defer wg.Done() state := conn.GetState() - log.Debug(fmt.Sprintf("Connection state started as %q", state)) + log.Debug(fmt.Sprintf("Connection state started as %s", state)) for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() { - log.Debug(fmt.Sprintf("Connection state changed to %q", state)) + log.Debug(fmt.Sprintf("Connection state changed to %s", state)) } if state == connectivity.Ready { log.Debug("Connection ready") isReadyCallback() } else { - log.Debug(fmt.Sprintf("Connection state ended with %q", state)) + log.Debug(fmt.Sprintf("Connection state ended with %s", state)) } }() } diff --git a/internal/grpc/grpclog/grpclog_test.go b/internal/grpc/grpclog/grpclog_test.go index caebc0770..704f1a923 100644 --- a/internal/grpc/grpclog/grpclog_test.go +++ b/internal/grpc/grpclog/grpclog_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // grpclog provides a logging utilities for gRPC. @@ -33,8 +33,8 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 3) - assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0]) - assert.Equal(t, "Connection state changed to \"CONNECTING\"", lg.msgs[1]) + assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0]) + assert.Equal(t, "Connection state changed to CONNECTING", lg.msgs[1]) assert.Equal(t, "Connection ready", lg.msgs[2]) assert.True(t, isReadyCallbackCalled) }, @@ -49,7 +49,7 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 2) - assert.Equal(t, "Connection state started as \"READY\"", lg.msgs[0]) + assert.Equal(t, "Connection state started as READY", lg.msgs[0]) assert.Equal(t, "Connection ready", lg.msgs[1]) assert.True(t, isReadyCallbackCalled) }, @@ -64,8 +64,8 @@ func TestLogStateChanges(t *testing.T) { }, assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) { require.Len(t, lg.msgs, 2) - assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0]) - assert.Equal(t, "Connection state ended with \"CONNECTING\"", lg.msgs[1]) + assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0]) + assert.Equal(t, "Connection state ended with CONNECTING", lg.msgs[1]) assert.False(t, isReadyCallbackCalled) }, }, @@ -76,7 +76,7 @@ func TestLogStateChanges(t *testing.T) { var wg sync.WaitGroup isReadyCallbackCalled := false - LogStateChangesUntilReady(t.Context(), tc.conn, logger, &wg, func() { isReadyCallbackCalled = true }) + LogStateChangesUntilReady(context.Background(), tc.conn, logger, &wg, func() { isReadyCallbackCalled = true }) wg.Wait() tc.assert(t, logger, isReadyCallbackCalled) }) diff --git a/internal/grpc/retry/retry.go b/internal/grpc/retry/retry.go index 3a0f1724b..b7457fc1f 100644 --- a/internal/grpc/retry/retry.go +++ b/internal/grpc/retry/retry.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package retry provides functions to check if a gRPC error is retryable. diff --git a/internal/grpc/retry/retry_test.go b/internal/grpc/retry/retry_test.go index b6ad075ed..5e51e4bb0 100644 --- a/internal/grpc/retry/retry_test.go +++ b/internal/grpc/retry/retry_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package retry diff --git a/internal/grpc/testdialer/testdialer.go b/internal/grpc/testdialer/testdialer.go index d95c2be69..e6771903f 100644 --- a/internal/grpc/testdialer/testdialer.go +++ b/internal/grpc/testdialer/testdialer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdialer provides a fake dialer for testing. diff --git a/internal/imagefetcher/imagefetcher.go b/internal/imagefetcher/imagefetcher.go index 827adfc89..ebbf74e41 100644 --- a/internal/imagefetcher/imagefetcher.go +++ b/internal/imagefetcher/imagefetcher.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/imagefetcher/imagefetcher_test.go b/internal/imagefetcher/imagefetcher_test.go index e60443ccc..1397c4fb4 100644 --- a/internal/imagefetcher/imagefetcher_test.go +++ b/internal/imagefetcher/imagefetcher_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package imagefetcher @@ -256,7 +256,7 @@ func TestFetchReference(t *testing.T) { fs: af, } - reference, err := fetcher.FetchReference(t.Context(), tc.provider, variant.Dummy{}, + reference, err := fetcher.FetchReference(context.Background(), tc.provider, variant.Dummy{}, tc.image, "someRegion", false) if tc.wantErr { diff --git a/internal/imagefetcher/raw.go b/internal/imagefetcher/raw.go index 1375fdca7..593b0d9e7 100644 --- a/internal/imagefetcher/raw.go +++ b/internal/imagefetcher/raw.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package imagefetcher diff --git a/internal/imagefetcher/raw_test.go b/internal/imagefetcher/raw_test.go index 86a44ab88..e2bbd8b9d 100644 --- a/internal/imagefetcher/raw_test.go +++ b/internal/imagefetcher/raw_test.go @@ -1,13 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package imagefetcher import ( "bytes" + "context" "io" "net/http" "os" @@ -90,7 +91,7 @@ func TestDownloadWithProgress(t *testing.T) { fs: fs, } var outBuffer bytes.Buffer - err := downloader.downloadWithProgress(t.Context(), &outBuffer, false, tc.source, "someVersion.raw") + err := downloader.downloadWithProgress(context.Background(), &outBuffer, false, tc.source, "someVersion.raw") if tc.wantErr { assert.Error(err) return @@ -166,7 +167,7 @@ func TestDownload(t *testing.T) { fs: fs, } var outBuffer bytes.Buffer - gotDestination, err := downloader.Download(t.Context(), &outBuffer, false, tc.source, "someVersion") + gotDestination, err := downloader.Download(context.Background(), &outBuffer, false, tc.source, "someVersion") if tc.wantErr { assert.Error(err) return diff --git a/internal/installer/installer.go b/internal/installer/installer.go index 324815b74..dd26ea12e 100644 --- a/internal/installer/installer.go +++ b/internal/installer/installer.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package installer provides functionality to install binary components of supported kubernetes versions. diff --git a/internal/installer/installer_test.go b/internal/installer/installer_test.go index 517a070de..1e346f434 100644 --- a/internal/installer/installer_test.go +++ b/internal/installer/installer_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package installer @@ -132,7 +132,7 @@ func TestInstall(t *testing.T) { retriable: func(_ error) bool { return false }, } - err := inst.Install(t.Context(), tc.component) + err := inst.Install(context.Background(), tc.component) if tc.wantErr { assert.Error(err) return @@ -340,7 +340,7 @@ func TestRetryDownloadToTempDir(t *testing.T) { } // abort retryDownloadToTempDir in some test cases by using the context - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() wg := sync.WaitGroup{} @@ -429,7 +429,7 @@ func TestDownloadToTempDir(t *testing.T) { fs: &afero.Afero{Fs: afs}, hClient: &hClient, } - path, err := inst.downloadToTempDir(t.Context(), "http://server/path") + path, err := inst.downloadToTempDir(context.Background(), "http://server/path") if tc.wantErr { assert.Error(err) return diff --git a/internal/kms/config/config.go b/internal/kms/config/config.go index 92f54979e..5af6d3e39 100644 --- a/internal/kms/config/config.go +++ b/internal/kms/config/config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package config provides configuration constants for the KeyService. diff --git a/internal/kms/kms/aws/aws.go b/internal/kms/kms/aws/aws.go index 9efe03a75..e47cbb9da 100644 --- a/internal/kms/kms/aws/aws.go +++ b/internal/kms/kms/aws/aws.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package aws implements a KMS backend for AWS KMS. diff --git a/internal/kms/kms/azure/azure.go b/internal/kms/kms/azure/azure.go index abbf34ed2..64deec26e 100644 --- a/internal/kms/kms/azure/azure.go +++ b/internal/kms/kms/azure/azure.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package azure implements KMS backends for Azure Key Vault and Azure managed HSM. diff --git a/internal/kms/kms/cluster/cluster.go b/internal/kms/kms/cluster/cluster.go index a9bc0bab2..6ade22f40 100644 --- a/internal/kms/kms/cluster/cluster.go +++ b/internal/kms/kms/cluster/cluster.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/kms/cluster/cluster_test.go b/internal/kms/kms/cluster/cluster_test.go index f276f096e..d9ec6d7a1 100644 --- a/internal/kms/kms/cluster/cluster_test.go +++ b/internal/kms/kms/cluster/cluster_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package cluster import ( + "context" "strings" "testing" @@ -28,7 +29,7 @@ func TestClusterKMS(t *testing.T) { require.NoError(err) keyLower, err := kms.GetDEK( - t.Context(), + context.Background(), strings.ToLower(testVector.InfoPrefix+testVector.Info), int(testVector.Length), ) @@ -37,7 +38,7 @@ func TestClusterKMS(t *testing.T) { // output of the KMS should be case sensitive keyUpper, err := kms.GetDEK( - t.Context(), + context.Background(), strings.ToUpper(testVector.InfoPrefix+testVector.Info), int(testVector.Length), ) @@ -104,7 +105,7 @@ func TestVectorsHKDF(t *testing.T) { } require.NoError(err) - out, err := kms.GetDEK(t.Context(), tc.dekID, int(tc.dekSize)) + out, err := kms.GetDEK(context.Background(), tc.dekID, int(tc.dekSize)) require.NoError(err) assert.Equal(tc.wantKey, out) }) diff --git a/internal/kms/kms/gcp/gcp.go b/internal/kms/kms/gcp/gcp.go index 30a02449d..dfbdef9af 100644 --- a/internal/kms/kms/gcp/gcp.go +++ b/internal/kms/kms/gcp/gcp.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/kms/internal/internal.go b/internal/kms/kms/internal/internal.go index b6af19ef8..914295a43 100644 --- a/internal/kms/kms/internal/internal.go +++ b/internal/kms/kms/internal/internal.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/kms/internal/internal_test.go b/internal/kms/kms/internal/internal_test.go index 3058b4d7c..a7fc25ca6 100644 --- a/internal/kms/kms/internal/internal_test.go +++ b/internal/kms/kms/internal/internal_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package internal @@ -135,7 +135,7 @@ func TestGetDEK(t *testing.T) { Storage: tc.storage, } - dek, err := client.GetDEK(t.Context(), "volume-01", 32) + dek, err := client.GetDEK(context.Background(), "volume-01", 32) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/kms/kms/kms.go b/internal/kms/kms/kms.go index fe63957f2..d14eb435e 100644 --- a/internal/kms/kms/kms.go +++ b/internal/kms/kms/kms.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kms provides an abstract interface for Key Management Services. diff --git a/internal/kms/setup/setup.go b/internal/kms/setup/setup.go index 99f4bcf6c..eee089e77 100644 --- a/internal/kms/setup/setup.go +++ b/internal/kms/setup/setup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/setup/setup_test.go b/internal/kms/setup/setup_test.go index 1c8ee75e4..73bb29565 100644 --- a/internal/kms/setup/setup_test.go +++ b/internal/kms/setup/setup_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package setup import ( + "context" "testing" "github.com/edgelesssys/constellation/v2/internal/kms/uri" @@ -25,12 +26,12 @@ func TestMain(m *testing.M) { func TestSetUpKMS(t *testing.T) { assert := assert.New(t) - kms, err := KMS(t.Context(), "storage://unknown", "kms://unknown") + kms, err := KMS(context.Background(), "storage://unknown", "kms://unknown") assert.Error(err) assert.Nil(kms) masterSecret := uri.MasterSecret{Key: []byte("key"), Salt: []byte("salt")} - kms, err = KMS(t.Context(), "storage://no-store", masterSecret.EncodeToURI()) + kms, err = KMS(context.Background(), "storage://no-store", masterSecret.EncodeToURI()) assert.NoError(err) assert.NotNil(kms) } diff --git a/internal/kms/storage/awss3/awss3.go b/internal/kms/storage/awss3/awss3.go index c3d59c503..535ab944e 100644 --- a/internal/kms/storage/awss3/awss3.go +++ b/internal/kms/storage/awss3/awss3.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package awss3 implements a storage backend for the KMS using AWS S3: https://aws.amazon.com/s3/ diff --git a/internal/kms/storage/awss3/awss3_test.go b/internal/kms/storage/awss3/awss3_test.go index 153bbd209..4e07ab84d 100644 --- a/internal/kms/storage/awss3/awss3_test.go +++ b/internal/kms/storage/awss3/awss3_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package awss3 @@ -80,7 +80,7 @@ func TestAWSS3Get(t *testing.T) { client: tc.client, } - out, err := store.Get(t.Context(), "test-key") + out, err := store.Get(context.Background(), "test-key") if tc.wantErr { assert.Error(err) @@ -122,7 +122,7 @@ func TestAWSS3Put(t *testing.T) { testData := []byte{0x1, 0x2, 0x3} - err := store.Put(t.Context(), "test-key", testData) + err := store.Put(context.Background(), "test-key", testData) if tc.wantErr { assert.Error(err) } else { @@ -163,7 +163,7 @@ func TestAWSS3CreateBucket(t *testing.T) { client: tc.client, } - err := store.createBucket(t.Context(), "test-bucket", "test-region") + err := store.createBucket(context.Background(), "test-bucket", "test-region") if tc.wantErr { assert.Error(err) } else { diff --git a/internal/kms/storage/azureblob/azureblob.go b/internal/kms/storage/azureblob/azureblob.go index 36483a684..e7e41424e 100644 --- a/internal/kms/storage/azureblob/azureblob.go +++ b/internal/kms/storage/azureblob/azureblob.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package azureblob implements a storage backend for the KMS using Azure Blob Storage. diff --git a/internal/kms/storage/azureblob/azureblob_test.go b/internal/kms/storage/azureblob/azureblob_test.go index 19c590be4..93a5f2987 100644 --- a/internal/kms/storage/azureblob/azureblob_test.go +++ b/internal/kms/storage/azureblob/azureblob_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package azureblob @@ -51,7 +51,7 @@ func TestAzureGet(t *testing.T) { container: "test", } - out, err := client.Get(t.Context(), "test-key") + out, err := client.Get(context.Background(), "test-key") if tc.wantErr { assert.Error(err) @@ -93,7 +93,7 @@ func TestAzurePut(t *testing.T) { container: "test", } - err := client.Put(t.Context(), "test-key", testData) + err := client.Put(context.Background(), "test-key", testData) if tc.wantErr { assert.Error(err) return @@ -130,7 +130,7 @@ func TestCreateContainerOrContinue(t *testing.T) { container: "test", } - err := client.createContainerOrContinue(t.Context()) + err := client.createContainerOrContinue(context.Background()) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/kms/storage/gcs/gcs.go b/internal/kms/storage/gcs/gcs.go index f3c19ef2b..ca53bf55f 100644 --- a/internal/kms/storage/gcs/gcs.go +++ b/internal/kms/storage/gcs/gcs.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package gcs implements a storage backend for the KMS using Google Cloud Storage (GCS). diff --git a/internal/kms/storage/gcs/gcs_test.go b/internal/kms/storage/gcs/gcs_test.go index 7d3d8dd27..5678afee5 100644 --- a/internal/kms/storage/gcs/gcs_test.go +++ b/internal/kms/storage/gcs/gcs_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package gcs @@ -103,7 +103,7 @@ func TestGCPGet(t *testing.T) { bucketName: "test", } - out, err := client.Get(t.Context(), "test-key") + out, err := client.Get(context.Background(), "test-key") if tc.wantErr { assert.Error(err) @@ -160,7 +160,7 @@ func TestGCPPut(t *testing.T) { } testData := []byte{0x1, 0x2, 0x3} - err := client.Put(t.Context(), "test-key", testData) + err := client.Put(context.Background(), "test-key", testData) if tc.wantErr { assert.Error(err) } else { @@ -211,7 +211,7 @@ func TestGCPCreateContainerOrContinue(t *testing.T) { bucketName: "test", } - err := client.createContainerOrContinue(t.Context(), "project") + err := client.createContainerOrContinue(context.Background(), "project") if tc.wantErr { assert.Error(err) } else { diff --git a/internal/kms/storage/memfs/memfs.go b/internal/kms/storage/memfs/memfs.go index 98f2d65af..3acb4ca53 100644 --- a/internal/kms/storage/memfs/memfs.go +++ b/internal/kms/storage/memfs/memfs.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package memfs implements a storage backend for the KMS that stores keys in memory only. diff --git a/internal/kms/storage/memfs/memfs_test.go b/internal/kms/storage/memfs/memfs_test.go index cad508632..98d246d1b 100644 --- a/internal/kms/storage/memfs/memfs_test.go +++ b/internal/kms/storage/memfs/memfs_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package memfs import ( + "context" "testing" "github.com/edgelesssys/constellation/v2/internal/kms/storage" @@ -29,7 +30,7 @@ func TestMemMapStorage(t *testing.T) { testDEK1 := []byte("test DEK") testDEK2 := []byte("more test DEK") - ctx := t.Context() + ctx := context.Background() // request unset value _, err := store.Get(ctx, "test:input") diff --git a/internal/kms/storage/storage.go b/internal/kms/storage/storage.go index 21cc04146..d8ec42c1e 100644 --- a/internal/kms/storage/storage.go +++ b/internal/kms/storage/storage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/test/aws_test.go b/internal/kms/test/aws_test.go index bc1084c5b..073a80946 100644 --- a/internal/kms/test/aws_test.go +++ b/internal/kms/test/aws_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package test @@ -34,7 +34,7 @@ func TestAwsStorage(t *testing.T) { } require := require.New(t) - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() // create bucket @@ -105,7 +105,7 @@ func TestAwsKms(t *testing.T) { require := require.New(t) store := memfs.New() - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.AWSConfig{ diff --git a/internal/kms/test/azure_test.go b/internal/kms/test/azure_test.go index d5633b70a..855b4dd54 100644 --- a/internal/kms/test/azure_test.go +++ b/internal/kms/test/azure_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package test @@ -31,7 +31,7 @@ func TestAzureStorage(t *testing.T) { } require := require.New(t) - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.AzureBlobConfig{ @@ -59,7 +59,7 @@ func TestAzureKeyKMS(t *testing.T) { require := require.New(t) store := memfs.New() - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.AzureConfig{ @@ -88,7 +88,7 @@ func TestAzureKeyHSM(t *testing.T) { require := require.New(t) store := memfs.New() - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.AzureConfig{ diff --git a/internal/kms/test/gcp_test.go b/internal/kms/test/gcp_test.go index 598db9c13..35162e0f1 100644 --- a/internal/kms/test/gcp_test.go +++ b/internal/kms/test/gcp_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package test @@ -32,7 +32,7 @@ func TestGCPKMS(t *testing.T) { require := require.New(t) store := memfs.New() - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.GCPConfig{ @@ -59,7 +59,7 @@ func TestGcpStorage(t *testing.T) { } require := require.New(t) - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() cfg := uri.GoogleCloudStorageConfig{ diff --git a/internal/kms/test/integration_test.go b/internal/kms/test/integration_test.go index d63834f44..bd6dccd80 100644 --- a/internal/kms/test/integration_test.go +++ b/internal/kms/test/integration_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package test provides integration tests for KMS and storage backends. @@ -64,7 +64,7 @@ func runKMSTest(t *testing.T, kms kms.CloudKMS) { dekName := "test-dek" - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() res, err := kms.GetDEK(ctx, dekName, config.SymmetricKeyLength) @@ -90,7 +90,7 @@ func runStorageTest(t *testing.T, store kms.Storage) { testData := []byte("Constellation test data") testName := "constellation-test" - ctx, cancel := context.WithTimeout(t.Context(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() err := store.Put(ctx, testName, testData) diff --git a/internal/kms/uri/uri.go b/internal/kms/uri/uri.go index bcc3a5d5e..6a3de8887 100644 --- a/internal/kms/uri/uri.go +++ b/internal/kms/uri/uri.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kms/uri/uri_test.go b/internal/kms/uri/uri_test.go index 5532dc2c3..b7e2ea388 100644 --- a/internal/kms/uri/uri_test.go +++ b/internal/kms/uri/uri_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package uri diff --git a/internal/kubernetes/configmaps.go b/internal/kubernetes/configmaps.go index 3ad5f90af..0aed90a05 100644 --- a/internal/kubernetes/configmaps.go +++ b/internal/kubernetes/configmaps.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/kubernetes/configmaps_test.go b/internal/kubernetes/configmaps_test.go index 702ab4d5e..96c3f475d 100644 --- a/internal/kubernetes/configmaps_test.go +++ b/internal/kubernetes/configmaps_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/kubernetes/kubectl/kubectl.go b/internal/kubernetes/kubectl/kubectl.go index 2e8ddd7f5..f61488082 100644 --- a/internal/kubernetes/kubectl/kubectl.go +++ b/internal/kubernetes/kubectl/kubectl.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -188,6 +188,65 @@ func (k *Kubectl) PatchFirstNodePodCIDR(ctx context.Context, firstNodePodCIDR st return err } +// EnforceCoreDNSSpread adds a pod anti-affinity to the CoreDNS deployment to ensure that +// CoreDNS pods are spread across nodes. +func (k *Kubectl) EnforceCoreDNSSpread(ctx context.Context) error { + // allow CoreDNS Pods to run on uninitialized nodes, which is required by cloud-controller-manager + tolerationSeconds := int64(10) + tolerations := []corev1.Toleration{ + { + Key: "node.cloudprovider.kubernetes.io/uninitialized", + Value: "true", + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node.kubernetes.io/unreachable", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: &tolerationSeconds, + }, + } + + deployments := k.AppsV1().Deployments("kube-system") + // retry resource update if an error occurs + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + result, err := deployments.Get(ctx, "coredns", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get Deployment to add toleration: %w", err) + } + + result.Spec.Template.Spec.Tolerations = append(result.Spec.Template.Spec.Tolerations, tolerations...) + + if result.Spec.Template.Spec.Affinity == nil { + result.Spec.Template.Spec.Affinity = &corev1.Affinity{} + } + if result.Spec.Template.Spec.Affinity.PodAntiAffinity == nil { + result.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{} + } + result.Spec.Template.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []corev1.WeightedPodAffinityTerm{} + if result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []corev1.PodAffinityTerm{} + } + + result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(result.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "k8s-app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"kube-dns"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }) + + _, err = deployments.Update(ctx, result, metav1.UpdateOptions{}) + return err + }) +} + // AddNodeSelectorsToDeployment adds [K8s selectors] to the deployment, identified // by name and namespace. // diff --git a/internal/kubernetes/kubectl/kubectl_test.go b/internal/kubernetes/kubectl/kubectl_test.go index 5bdee84f7..3ca00e51d 100644 --- a/internal/kubernetes/kubectl/kubectl_test.go +++ b/internal/kubernetes/kubectl/kubectl_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubectl diff --git a/internal/kubernetes/kubernetes.go b/internal/kubernetes/kubernetes.go index 6d43c1b51..cf8c478da 100644 --- a/internal/kubernetes/kubernetes.go +++ b/internal/kubernetes/kubernetes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/kubernetes/marshal.go b/internal/kubernetes/marshal.go index 958cbf956..d402ce824 100644 --- a/internal/kubernetes/marshal.go +++ b/internal/kubernetes/marshal.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/kubernetes/marshal_test.go b/internal/kubernetes/marshal_test.go index 2dff4d4fd..9da401000 100644 --- a/internal/kubernetes/marshal_test.go +++ b/internal/kubernetes/marshal_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/kubernetes/secrets.go b/internal/kubernetes/secrets.go index 7cdfc848b..4c8847c61 100644 --- a/internal/kubernetes/secrets.go +++ b/internal/kubernetes/secrets.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/kubernetes/secrets_test.go b/internal/kubernetes/secrets_test.go index 972d539bc..bc91da831 100644 --- a/internal/kubernetes/secrets_test.go +++ b/internal/kubernetes/secrets_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/internal/license/checker_enterprise.go b/internal/license/checker_enterprise.go index 9807c992c..f98fe7e98 100644 --- a/internal/license/checker_enterprise.go +++ b/internal/license/checker_enterprise.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package license diff --git a/internal/license/checker_enterprise_test.go b/internal/license/checker_enterprise_test.go index fd35b786c..1443ef2f1 100644 --- a/internal/license/checker_enterprise_test.go +++ b/internal/license/checker_enterprise_test.go @@ -3,13 +3,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package license import ( "bytes" + "context" "io" "net/http" "testing" @@ -82,7 +83,7 @@ func TestQuotaCheck(t *testing.T) { }), } - quota, err := client.CheckLicense(t.Context(), cloudprovider.Unknown, Init, tc.license) + quota, err := client.CheckLicense(context.Background(), cloudprovider.Unknown, Init, tc.license) if tc.wantError { assert.Error(err) diff --git a/internal/license/checker_oss.go b/internal/license/checker_oss.go index 3ada97f0a..58253817e 100644 --- a/internal/license/checker_oss.go +++ b/internal/license/checker_oss.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package license diff --git a/internal/license/file.go b/internal/license/file.go index 9df7d6ab0..01f5afdff 100644 --- a/internal/license/file.go +++ b/internal/license/file.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package license diff --git a/internal/license/file_test.go b/internal/license/file_test.go index 3114a5f27..84101dd72 100644 --- a/internal/license/file_test.go +++ b/internal/license/file_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package license diff --git a/internal/license/integration/license_integration_test.go b/internal/license/integration/license_integration_test.go index 64ba47011..f4b67f00d 100644 --- a/internal/license/integration/license_integration_test.go +++ b/internal/license/integration/license_integration_test.go @@ -3,12 +3,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package integration import ( + "context" "testing" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -38,7 +39,7 @@ func TestQuotaCheckIntegration(t *testing.T) { client := license.NewChecker() - quota, err := client.CheckLicense(t.Context(), cloudprovider.Unknown, "test", tc.license) + quota, err := client.CheckLicense(context.Background(), cloudprovider.Unknown, "test", tc.license) if tc.wantError { assert.Error(err) diff --git a/internal/license/license.go b/internal/license/license.go index 5fcd91f98..0010bd2d0 100644 --- a/internal/license/license.go +++ b/internal/license/license.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package license provides functions to check a user's Constellation license. diff --git a/internal/logger/BUILD.bazel b/internal/logger/BUILD.bazel index 10f753f44..4b8daad96 100644 --- a/internal/logger/BUILD.bazel +++ b/internal/logger/BUILD.bazel @@ -12,7 +12,7 @@ go_library( visibility = ["//:__subpackages__"], deps = [ "@com_github_grpc_ecosystem_go_grpc_middleware_v2//interceptors/logging", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//grpclog", ], ) diff --git a/internal/logger/cmdline.go b/internal/logger/cmdline.go index 9bdfa95cf..f03b773b0 100644 --- a/internal/logger/cmdline.go +++ b/internal/logger/cmdline.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logger diff --git a/internal/logger/grpclogger.go b/internal/logger/grpclogger.go index fead5cf8a..716b0e495 100644 --- a/internal/logger/grpclogger.go +++ b/internal/logger/grpclogger.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logger @@ -19,28 +19,24 @@ import ( func replaceGRPCLogger(log *slog.Logger) { gl := &grpcLogger{ - logger: log, + logger: log.With(slog.String("system", "grpc"), slog.Bool("grpc_log", true)), verbosity: 0, } grpclog.SetLoggerV2(gl) } func (l *grpcLogger) log(level slog.Level, args ...interface{}) { - if l.logger.Enabled(context.Background(), level) { - var pcs [1]uintptr - runtime.Callers(3, pcs[:]) - r := slog.NewRecord(time.Now(), level, fmt.Sprint(args...), pcs[0]) - _ = l.logger.Handler().Handle(context.Background(), r) - } + var pcs [1]uintptr + runtime.Callers(3, pcs[:]) + r := slog.NewRecord(time.Now(), level, fmt.Sprint(args...), pcs[0]) + _ = l.logger.Handler().Handle(context.Background(), r) } func (l *grpcLogger) logf(level slog.Level, format string, args ...interface{}) { - if l.logger.Enabled(context.Background(), level) { - var pcs [1]uintptr - runtime.Callers(3, pcs[:]) - r := slog.NewRecord(time.Now(), level, fmt.Sprintf(format, args...), pcs[0]) - _ = l.logger.Handler().Handle(context.Background(), r) - } + var pcs [1]uintptr + runtime.Callers(3, pcs[:]) + r := slog.NewRecord(time.Now(), level, fmt.Sprintf(format, args...), pcs[0]) + _ = l.logger.Handler().Handle(context.Background(), r) } type grpcLogger struct { diff --git a/internal/logger/levelhandler.go b/internal/logger/levelhandler.go index d9b4cec29..f0e4e1544 100644 --- a/internal/logger/levelhandler.go +++ b/internal/logger/levelhandler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package logger @@ -13,45 +13,45 @@ import ( // LevelHandler copied from the official LevelHandler example in the slog package documentation. -// levelHandler wraps a Handler with an Enabled method +// LevelHandler wraps a Handler with an Enabled method // that returns false for levels below a minimum. -type levelHandler struct { +type LevelHandler struct { level slog.Leveler handler slog.Handler } -// newLevelHandler returns a LevelHandler with the given level. +// NewLevelHandler returns a LevelHandler with the given level. // All methods except Enabled delegate to h. -func newLevelHandler(level slog.Leveler, h slog.Handler) *levelHandler { +func NewLevelHandler(level slog.Leveler, h slog.Handler) *LevelHandler { // Optimization: avoid chains of LevelHandlers. - if lh, ok := h.(*levelHandler); ok { + if lh, ok := h.(*LevelHandler); ok { h = lh.Handler() } - return &levelHandler{level, h} + return &LevelHandler{level, h} } // Enabled implements Handler.Enabled by reporting whether // level is at least as large as h's level. -func (h *levelHandler) Enabled(_ context.Context, level slog.Level) bool { +func (h *LevelHandler) Enabled(_ context.Context, level slog.Level) bool { return level >= h.level.Level() } // Handle implements Handler.Handle. -func (h *levelHandler) Handle(ctx context.Context, r slog.Record) error { +func (h *LevelHandler) Handle(ctx context.Context, r slog.Record) error { return h.handler.Handle(ctx, r) } // WithAttrs implements Handler.WithAttrs. -func (h *levelHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - return newLevelHandler(h.level, h.handler.WithAttrs(attrs)) +func (h *LevelHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return NewLevelHandler(h.level, h.handler.WithAttrs(attrs)) } // WithGroup implements Handler.WithGroup. -func (h *levelHandler) WithGroup(name string) slog.Handler { - return newLevelHandler(h.level, h.handler.WithGroup(name)) +func (h *LevelHandler) WithGroup(name string) slog.Handler { + return NewLevelHandler(h.level, h.handler.WithGroup(name)) } // Handler returns the Handler wrapped by h. -func (h *levelHandler) Handler() slog.Handler { +func (h *LevelHandler) Handler() slog.Handler { return h.handler } diff --git a/internal/logger/log.go b/internal/logger/log.go index 0f1b23789..d8d62b13a 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -35,11 +35,6 @@ import ( "google.golang.org/grpc" ) -// GRPCLogger returns a logger at warn level for gRPC logging. -func GRPCLogger(l *slog.Logger) *slog.Logger { - return slog.New(newLevelHandler(slog.LevelWarn, l.Handler())).WithGroup("gRPC") -} - // ReplaceGRPCLogger replaces grpc's internal logger with the given logger. func ReplaceGRPCLogger(l *slog.Logger) { replaceGRPCLogger(l) diff --git a/internal/maa/maa.go b/internal/maa/maa.go index cd1012cd7..fcbea6db7 100644 --- a/internal/maa/maa.go +++ b/internal/maa/maa.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package maa provides an interface for interacting with an MAA service diff --git a/internal/maa/patch.go b/internal/maa/patch.go index 28b496658..5dfed9435 100644 --- a/internal/maa/patch.go +++ b/internal/maa/patch.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package maa @@ -9,7 +9,6 @@ import ( "context" "encoding/base64" "fmt" - "io" "net/http" "github.com/Azure/azure-sdk-for-go/profiles/latest/attestation/attestation" @@ -56,11 +55,10 @@ func (p AzurePolicyPatcher) Patch(ctx context.Context, attestationURL string) er if err != nil { return fmt.Errorf("sending request: %w", err) } - defer resp.Body.Close() + resp.Body.Close() if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("updating attestation policy: unexpected status code: %s: %s", resp.Status, string(body)) + return fmt.Errorf("updating attestation policy: unexpected status code: %s", resp.Status) } return nil diff --git a/internal/maa/patch_test.go b/internal/maa/patch_test.go index af87a8432..f00c30c7c 100644 --- a/internal/maa/patch_test.go +++ b/internal/maa/patch_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package maa diff --git a/internal/mpimage/mpimage.go b/internal/mpimage/mpimage.go index b25526d20..89b6d1fa9 100644 --- a/internal/mpimage/mpimage.go +++ b/internal/mpimage/mpimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // The mpimage package provides utilities for handling CSP marketplace OS images. diff --git a/internal/mpimage/uri.go b/internal/mpimage/uri.go index 9a41fafac..36c13afb2 100644 --- a/internal/mpimage/uri.go +++ b/internal/mpimage/uri.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package mpimage diff --git a/internal/mpimage/uri_test.go b/internal/mpimage/uri_test.go index cf7eac912..f7dfd3fe1 100644 --- a/internal/mpimage/uri_test.go +++ b/internal/mpimage/uri_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package mpimage diff --git a/internal/nodestate/nodestate.go b/internal/nodestate/nodestate.go index e31dee7e8..40e8113c7 100644 --- a/internal/nodestate/nodestate.go +++ b/internal/nodestate/nodestate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package nodestate is used to persist the state of a Constellation node to disk. diff --git a/internal/nodestate/nodestate_test.go b/internal/nodestate/nodestate_test.go index 576242a48..41081f87f 100644 --- a/internal/nodestate/nodestate_test.go +++ b/internal/nodestate/nodestate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package nodestate diff --git a/internal/osimage/archive/archive.go b/internal/osimage/archive/archive.go index e4c9f6e2d..f42b48e4c 100644 --- a/internal/osimage/archive/archive.go +++ b/internal/osimage/archive/archive.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package archive is used to archive OS images in S3. @@ -74,7 +74,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs if err != nil { return "", err } - a.log.Debug(fmt.Sprintf("Archiving OS image %q to s3://%s/%s", fmt.Sprintf("%s %s %v", csp, attestationVariant, version.ShortPath()), a.bucket, key)) + a.log.Debug(fmt.Sprintf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key)) _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/imageinfo/imageinfo.go b/internal/osimage/imageinfo/imageinfo.go index 95a30595c..a26ab24a5 100644 --- a/internal/osimage/imageinfo/imageinfo.go +++ b/internal/osimage/imageinfo/imageinfo.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package imageinfo is used to upload image info JSON files to S3. @@ -78,7 +78,7 @@ func (a *Uploader) Upload(ctx context.Context, imageInfo versionsapi.ImageInfo) if err != nil { return "", err } - a.log.Debug(fmt.Sprintf("Archiving image info to s3://%s/%s", a.bucket, key)) + a.log.Debug(fmt.Sprintf("Archiving image info to s3://%v/%v", a.bucket, key)) buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(imageInfo); err != nil { return "", err diff --git a/internal/osimage/measurementsuploader/measurementsuploader.go b/internal/osimage/measurementsuploader/measurementsuploader.go index 928089e10..1e6c9ffa0 100644 --- a/internal/osimage/measurementsuploader/measurementsuploader.go +++ b/internal/osimage/measurementsuploader/measurementsuploader.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package measurementsuploader is used to upload measurements (v2) JSON files (and signatures) to S3. @@ -92,7 +92,7 @@ func (a *Uploader) Upload(ctx context.Context, rawMeasurement, signature io.Read if err != nil { return "", "", err } - a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%s/%s and s3://%s/%s", a.bucket, key, a.bucket, sigKey)) + a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey)) if _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/nop/nop.go b/internal/osimage/nop/nop.go index 43c57000b..5618acf03 100644 --- a/internal/osimage/nop/nop.go +++ b/internal/osimage/nop/nop.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package nop implements a no-op for CSPs that don't require custom image upload functionality. @@ -28,6 +28,6 @@ func New(log *slog.Logger) *Uploader { // Upload pretends to upload images to a csp. func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { - u.log.Debug(fmt.Sprintf("Skipping image upload of %q since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath())) + u.log.Debug(fmt.Sprintf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath())) return nil, nil } diff --git a/internal/osimage/osimage.go b/internal/osimage/osimage.go index 035c0d966..9e0cfdc1a 100644 --- a/internal/osimage/osimage.go +++ b/internal/osimage/osimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package osimage is used to handle osimages in the CI (uploading and maintenance). diff --git a/internal/osimage/secureboot/secureboot.go b/internal/osimage/secureboot/secureboot.go index 470982342..363355efe 100644 --- a/internal/osimage/secureboot/secureboot.go +++ b/internal/osimage/secureboot/secureboot.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package secureboot holds secure boot configuration for image uploads. diff --git a/internal/osimage/secureboot/secureboot_test.go b/internal/osimage/secureboot/secureboot_test.go index d9ac7e98c..679888190 100644 --- a/internal/osimage/secureboot/secureboot_test.go +++ b/internal/osimage/secureboot/secureboot_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package secureboot diff --git a/internal/osimage/secureboot/zlibdict.go b/internal/osimage/secureboot/zlibdict.go index 61511653b..34e81369b 100644 --- a/internal/osimage/secureboot/zlibdict.go +++ b/internal/osimage/secureboot/zlibdict.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package secureboot diff --git a/internal/osimage/uplosi/uplosi.conf.in b/internal/osimage/uplosi/uplosi.conf.in index 2978d31e0..74ec45434 100644 --- a/internal/osimage/uplosi/uplosi.conf.in +++ b/internal/osimage/uplosi/uplosi.conf.in @@ -12,7 +12,6 @@ subscriptionID = "0d202bbb-4fa7-4af8-8125-58c269a05435" location = "northeurope" resourceGroup = "constellation-images" sharingNamePrefix = "constellation" -sharingProfile = "community" sku = "constellation" publisher = "edgelesssys" diff --git a/internal/osimage/uplosi/uplosiupload.go b/internal/osimage/uplosi/uplosiupload.go index bf2d0f1e3..2b54b2c33 100644 --- a/internal/osimage/uplosi/uplosiupload.go +++ b/internal/osimage/uplosi/uplosiupload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // package uplosi implements uploading os images using uplosi. @@ -255,7 +255,7 @@ func azureReplicationRegions(attestationVariant string) []string { case "azure-tdx": return []string{"northeurope", "westeurope", "centralus", "eastus2"} case "azure-sev-snp": - return []string{"northeurope", "westeurope", "germanywestcentral", "eastus", "eastus2", "westus", "southeastasia"} + return []string{"northeurope", "westeurope", "germanywestcentral", "eastus", "westus", "southeastasia"} } return nil } diff --git a/internal/retry/retry.go b/internal/retry/retry.go index 7ff799660..ab8bbdfdc 100644 --- a/internal/retry/retry.go +++ b/internal/retry/retry.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package retry provides a simple interface for retrying operations. diff --git a/internal/retry/retry_test.go b/internal/retry/retry_test.go index c4dc68227..8885ac715 100644 --- a/internal/retry/retry_test.go +++ b/internal/retry/retry_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package retry @@ -71,7 +71,7 @@ func TestDo(t *testing.T) { retriable: isRetriable, } retrierResult := make(chan error, 1) - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { retrierResult <- retrier.Do(ctx) }() diff --git a/internal/role/role.go b/internal/role/role.go index 6259764d6..4288bdae8 100644 --- a/internal/role/role.go +++ b/internal/role/role.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package role diff --git a/internal/role/role_test.go b/internal/role/role_test.go index 1c7132349..4ad65ea4b 100644 --- a/internal/role/role_test.go +++ b/internal/role/role_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package role diff --git a/internal/semver/semver.go b/internal/semver/semver.go index 4feb735be..72dc19f51 100644 --- a/internal/semver/semver.go +++ b/internal/semver/semver.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/semver/semver_test.go b/internal/semver/semver_test.go index 4f9ad658e..30b798514 100644 --- a/internal/semver/semver_test.go +++ b/internal/semver/semver_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package semver diff --git a/internal/sigstore/keyselect/keyselect.go b/internal/sigstore/keyselect/keyselect.go index ecfb10bf5..7e08e09a5 100644 --- a/internal/sigstore/keyselect/keyselect.go +++ b/internal/sigstore/keyselect/keyselect.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package keyselect is used to select the correct public key for signature verification. diff --git a/internal/sigstore/rekor.go b/internal/sigstore/rekor.go index a116bc011..a45377bd3 100644 --- a/internal/sigstore/rekor.go +++ b/internal/sigstore/rekor.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/sigstore/rekor_integration_test.go b/internal/sigstore/rekor_integration_test.go index 8f2a042ce..4870109fb 100644 --- a/internal/sigstore/rekor_integration_test.go +++ b/internal/sigstore/rekor_integration_test.go @@ -3,12 +3,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -42,7 +43,7 @@ func TestRekorSearchByHash(t *testing.T) { rekor, err := NewRekor() require.NoError(err) - uuids, err := rekor.SearchByHash(t.Context(), tc.hash) + uuids, err := rekor.SearchByHash(context.Background(), tc.hash) assert.NoError(err) if tc.wantEmpty { @@ -84,7 +85,7 @@ func TestVerifyEntry(t *testing.T) { rekor, err := NewRekor() require.NoError(err) - err = rekor.VerifyEntry(t.Context(), tc.uuid, tc.pubKey) + err = rekor.VerifyEntry(context.Background(), tc.uuid, tc.pubKey) if tc.wantError { assert.Error(err) return diff --git a/internal/sigstore/rekor_test.go b/internal/sigstore/rekor_test.go index 6d2db56c0..cbbeb3ddd 100644 --- a/internal/sigstore/rekor_test.go +++ b/internal/sigstore/rekor_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/sigstore/sign.go b/internal/sigstore/sign.go index 84fb58652..a98e6be9d 100644 --- a/internal/sigstore/sign.go +++ b/internal/sigstore/sign.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/sigstore/sign_test.go b/internal/sigstore/sign_test.go index 16659c04e..fefbdabd3 100644 --- a/internal/sigstore/sign_test.go +++ b/internal/sigstore/sign_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/sigstore/sigstore.go b/internal/sigstore/sigstore.go index dee491aed..0c90bd898 100644 --- a/internal/sigstore/sigstore.go +++ b/internal/sigstore/sigstore.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package sigstore is used to verify Constellation components using sigstore, cosign and rekor. diff --git a/internal/sigstore/verify.go b/internal/sigstore/verify.go index f24194c56..a13a3285e 100644 --- a/internal/sigstore/verify.go +++ b/internal/sigstore/verify.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/sigstore/verify_test.go b/internal/sigstore/verify_test.go index 7f5fd9fe6..ef7952b7a 100644 --- a/internal/sigstore/verify_test.go +++ b/internal/sigstore/verify_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sigstore diff --git a/internal/staticupload/delete.go b/internal/staticupload/delete.go index 5c87b3ea8..1d3fd12e1 100644 --- a/internal/staticupload/delete.go +++ b/internal/staticupload/delete.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package staticupload diff --git a/internal/staticupload/get.go b/internal/staticupload/get.go index db96b17a4..893fd243b 100644 --- a/internal/staticupload/get.go +++ b/internal/staticupload/get.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package staticupload diff --git a/internal/staticupload/staticupload.go b/internal/staticupload/staticupload.go index 2d4f21953..fd09734ad 100644 --- a/internal/staticupload/staticupload.go +++ b/internal/staticupload/staticupload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -134,7 +134,7 @@ func (c *Client) Flush(ctx context.Context) error { c.mux.Lock() defer c.mux.Unlock() - c.logger.Debug(fmt.Sprintf("Invalidating keys: %q", c.dirtyKeys)) + c.logger.Debug(fmt.Sprintf("Invalidating keys: %s", c.dirtyKeys)) if len(c.dirtyKeys) == 0 { return nil } @@ -219,7 +219,7 @@ func (c *Client) waitForInvalidations(ctx context.Context) error { } waiter := cloudfront.NewInvalidationCompletedWaiter(c.cdnClient) - c.logger.Debug(fmt.Sprintf("Waiting for invalidations %v in distribution %q", c.invalidationIDs, c.distributionID)) + c.logger.Debug(fmt.Sprintf("Waiting for invalidations %s in distribution %s", c.invalidationIDs, c.distributionID)) for _, invalidationID := range c.invalidationIDs { waitIn := &cloudfront.GetInvalidationInput{ DistributionId: &c.distributionID, diff --git a/internal/staticupload/staticupload_test.go b/internal/staticupload/staticupload_test.go index 7694afa0c..eace5cc1a 100644 --- a/internal/staticupload/staticupload_test.go +++ b/internal/staticupload/staticupload_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package staticupload @@ -108,7 +108,7 @@ func TestUpload(t *testing.T) { cacheInvalidationWaitTimeout: tc.cacheInvalidationWaitTimeout, logger: logger.NewTest(t), } - _, err := client.Upload(t.Context(), tc.in) + _, err := client.Upload(context.Background(), tc.in) var invalidationErr *InvalidationError if tc.wantCacheInvalidationErr { @@ -220,7 +220,7 @@ func TestDeleteObject(t *testing.T) { cacheInvalidationWaitTimeout: tc.cacheInvalidationWaitTimeout, logger: logger.NewTest(t), } - _, err := client.DeleteObject(t.Context(), newObjectInput(tc.nilInput, tc.nilKey)) + _, err := client.DeleteObject(context.Background(), newObjectInput(tc.nilInput, tc.nilKey)) var invalidationErr *InvalidationError if tc.wantCacheInvalidationErr { @@ -259,7 +259,7 @@ func TestDeleteObject(t *testing.T) { cacheInvalidationWaitTimeout: tc.cacheInvalidationWaitTimeout, logger: logger.NewTest(t), } - _, err := client.DeleteObjects(t.Context(), newObjectsInput(tc.nilInput, tc.nilKey)) + _, err := client.DeleteObjects(context.Background(), newObjectsInput(tc.nilInput, tc.nilKey)) var invalidationErr *InvalidationError if tc.wantCacheInvalidationErr { @@ -401,7 +401,7 @@ func TestFlush(t *testing.T) { invalidationIDs: tc.invalidationIDs, logger: logger.NewTest(t), } - err := client.Flush(t.Context()) + err := client.Flush(context.Background()) if tc.wantCacheInvalidationErr { var invalidationErr *InvalidationError @@ -444,18 +444,18 @@ func TestConcurrency(t *testing.T) { upload := func() { defer wg.Done() - _, _ = client.Upload(t.Context(), newInput()) + _, _ = client.Upload(context.Background(), newInput()) } deleteObject := func() { defer wg.Done() - _, _ = client.DeleteObject(t.Context(), &s3.DeleteObjectInput{ + _, _ = client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: ptr("test-bucket"), Key: ptr("test-key"), }) } deleteObjects := func() { defer wg.Done() - _, _ = client.DeleteObjects(t.Context(), &s3.DeleteObjectsInput{ + _, _ = client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ Bucket: ptr("test-bucket"), Delete: &s3types.Delete{ Objects: []s3types.ObjectIdentifier{ @@ -466,7 +466,7 @@ func TestConcurrency(t *testing.T) { } flushClient := func() { defer wg.Done() - _ = client.Flush(t.Context()) + _ = client.Flush(context.Background()) } for i := 0; i < 100; i++ { diff --git a/internal/staticupload/upload.go b/internal/staticupload/upload.go index a779caae5..de954351f 100644 --- a/internal/staticupload/upload.go +++ b/internal/staticupload/upload.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package staticupload diff --git a/internal/validation/constraints.go b/internal/validation/constraints.go index 73c88529d..6de84d8d1 100644 --- a/internal/validation/constraints.go +++ b/internal/validation/constraints.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package validation diff --git a/internal/validation/constraints_test.go b/internal/validation/constraints_test.go index aa9531140..103649d56 100644 --- a/internal/validation/constraints_test.go +++ b/internal/validation/constraints_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package validation diff --git a/internal/validation/errors.go b/internal/validation/errors.go index ee06a900b..3b73a6e82 100644 --- a/internal/validation/errors.go +++ b/internal/validation/errors.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package validation diff --git a/internal/validation/errors_test.go b/internal/validation/errors_test.go index 5f12e7ef6..9fe7678ed 100644 --- a/internal/validation/errors_test.go +++ b/internal/validation/errors_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package validation diff --git a/internal/validation/validation.go b/internal/validation/validation.go index 335799cb6..30c705f15 100644 --- a/internal/validation/validation.go +++ b/internal/validation/validation.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/internal/validation/validation_test.go b/internal/validation/validation_test.go index efde331ed..da65e9528 100644 --- a/internal/validation/validation_test.go +++ b/internal/validation/validation_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package validation diff --git a/internal/verify/verify.go b/internal/verify/verify.go index 63a0cf46a..60b2e726e 100644 --- a/internal/verify/verify.go +++ b/internal/verify/verify.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -131,7 +131,7 @@ func getCertChain(cfg config.AttestationCfg) ([]byte, error) { } if awsCfg.AMDSigningKey.Equal(config.Certificate{}) { - certs, err := trust.GetProductChain(kds.ProductLine(snp.Product()), abi.VlekReportSigner, trust.DefaultHTTPSGetter()) + certs, err := trust.GetProductChain(kds.ProductString(snp.Product()), abi.VlekReportSigner, trust.DefaultHTTPSGetter()) if err != nil { return nil, fmt.Errorf("getting product certificate chain: %w", err) } @@ -157,7 +157,7 @@ func getCertChain(cfg config.AttestationCfg) ([]byte, error) { return certChain, nil } -// FormatString builds a string representation of a report that is intended for console output. +// FormatString builds a string representation of a report that is inteded for console output. func (r *Report) FormatString(b *strings.Builder) (string, error) { if len(r.ReportSigner) != 1 { return "", fmt.Errorf("expected exactly one report signing certificate, found %d", len(r.ReportSigner)) @@ -216,7 +216,7 @@ type Certificate struct { func newCertificates(certTypeName string, cert []byte, log debugLog) (certs []Certificate, err error) { newlinesTrimmed := strings.TrimSpace(string(cert)) - log.Debug(fmt.Sprintf("Decoding PEM certificate: %q", certTypeName)) + log.Debug(fmt.Sprintf("Decoding PEM certificate: %s", certTypeName)) i := 1 var rest []byte var block *pem.Block diff --git a/internal/verify/verify_test.go b/internal/verify/verify_test.go index 3bfb5707b..b0fdf3c5b 100644 --- a/internal/verify/verify_test.go +++ b/internal/verify/verify_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package verify diff --git a/internal/versions/components/components.go b/internal/versions/components/components.go index 8cb33f49b..88c36850f 100644 --- a/internal/versions/components/components.go +++ b/internal/versions/components/components.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package components diff --git a/internal/versions/components/components.pb.go b/internal/versions/components/components.pb.go index 5236421c8..76fe28755 100644 --- a/internal/versions/components/components.pb.go +++ b/internal/versions/components/components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: internal/versions/components/components.proto package components @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,20 +21,23 @@ const ( ) type Component struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - InstallPath string `protobuf:"bytes,3,opt,name=install_path,json=installPath,proto3" json:"install_path,omitempty"` - Extract bool `protobuf:"varint,4,opt,name=extract,proto3" json:"extract,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + InstallPath string `protobuf:"bytes,3,opt,name=install_path,json=installPath,proto3" json:"install_path,omitempty"` + Extract bool `protobuf:"varint,4,opt,name=extract,proto3" json:"extract,omitempty"` } func (x *Component) Reset() { *x = Component{} - mi := &file_internal_versions_components_components_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_internal_versions_components_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Component) String() string { @@ -46,7 +48,7 @@ func (*Component) ProtoMessage() {} func (x *Component) ProtoReflect() protoreflect.Message { mi := &file_internal_versions_components_components_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -91,30 +93,39 @@ func (x *Component) GetExtract() bool { var File_internal_versions_components_components_proto protoreflect.FileDescriptor -const file_internal_versions_components_components_proto_rawDesc = "" + - "\n" + - "-internal/versions/components/components.proto\x12\n" + - "components\"n\n" + - "\tComponent\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x12\n" + - "\x04hash\x18\x02 \x01(\tR\x04hash\x12!\n" + - "\finstall_path\x18\x03 \x01(\tR\vinstallPath\x12\x18\n" + - "\aextract\x18\x04 \x01(\bR\aextractBFZDgithub.com/edgelesssys/constellation/v2/internal/versions/componentsb\x06proto3" +var file_internal_versions_components_components_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x6e, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x21, + 0x0a, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x42, 0x46, 0x5a, 0x44, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, + 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_internal_versions_components_components_proto_rawDescOnce sync.Once - file_internal_versions_components_components_proto_rawDescData []byte + file_internal_versions_components_components_proto_rawDescData = file_internal_versions_components_components_proto_rawDesc ) func file_internal_versions_components_components_proto_rawDescGZIP() []byte { file_internal_versions_components_components_proto_rawDescOnce.Do(func() { - file_internal_versions_components_components_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_internal_versions_components_components_proto_rawDesc), len(file_internal_versions_components_components_proto_rawDesc))) + file_internal_versions_components_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_versions_components_components_proto_rawDescData) }) return file_internal_versions_components_components_proto_rawDescData } var file_internal_versions_components_components_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_versions_components_components_proto_goTypes = []any{ +var file_internal_versions_components_components_proto_goTypes = []interface{}{ (*Component)(nil), // 0: components.Component } var file_internal_versions_components_components_proto_depIdxs = []int32{ @@ -130,11 +141,25 @@ func file_internal_versions_components_components_proto_init() { if File_internal_versions_components_components_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_versions_components_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Component); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_internal_versions_components_components_proto_rawDesc), len(file_internal_versions_components_components_proto_rawDesc)), + RawDescriptor: file_internal_versions_components_components_proto_rawDesc, NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -145,6 +170,7 @@ func file_internal_versions_components_components_proto_init() { MessageInfos: file_internal_versions_components_components_proto_msgTypes, }.Build() File_internal_versions_components_components_proto = out.File + file_internal_versions_components_components_proto_rawDesc = nil file_internal_versions_components_components_proto_goTypes = nil file_internal_versions_components_components_proto_depIdxs = nil } diff --git a/internal/versions/components/components_test.go b/internal/versions/components/components_test.go index fe7aea408..4b26ef3c1 100644 --- a/internal/versions/components/components_test.go +++ b/internal/versions/components/components_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package components diff --git a/internal/versions/hash-generator/generate.go b/internal/versions/hash-generator/generate.go index 55c4de154..107060bd6 100644 --- a/internal/versions/hash-generator/generate.go +++ b/internal/versions/hash-generator/generate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // hash-generator updates the binary hashes and kubeadm patches in versions.go in place. diff --git a/internal/versions/hash-generator/generate_test.go b/internal/versions/hash-generator/generate_test.go index c7ea5df5c..e55c3c1ef 100644 --- a/internal/versions/hash-generator/generate_test.go +++ b/internal/versions/hash-generator/generate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/internal/versions/versions.go b/internal/versions/versions.go index 6ef5811c6..a6e9c482c 100644 --- a/internal/versions/versions.go +++ b/internal/versions/versions.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -100,12 +100,12 @@ func ResolveK8sPatchVersion(k8sVersion string) (string, error) { // supported patch version as PATCH. func k8sVersionFromMajorMinor(version string) string { switch version { - case semver.MajorMinor(string(V1_30)): - return string(V1_30) - case semver.MajorMinor(string(V1_31)): - return string(V1_31) - case semver.MajorMinor(string(V1_32)): - return string(V1_32) + case semver.MajorMinor(string(V1_27)): + return string(V1_27) + case semver.MajorMinor(string(V1_28)): + return string(V1_28) + case semver.MajorMinor(string(V1_29)): + return string(V1_29) default: return "" } @@ -169,26 +169,26 @@ const ( // GcpGuestImage image for GCP guest agent. // Check for new versions at https://github.com/GoogleCloudPlatform/guest-agent/releases and update in /.github/workflows/build-gcp-guest-agent.yml. - GcpGuestImage = "ghcr.io/edgelesssys/gcp-guest-agent:v20250718.0.0@sha256:432c6bc1e65e7b28be1656e979587387a6789117c9d9c97dd20a79eb856c2042" // renovate:container + GcpGuestImage = "ghcr.io/edgelesssys/gcp-guest-agent:v20240213.0.0@sha256:aa7b27a4f9af356bdc6bad112e2255c68cd8759fb4430e4c91a5d19ced948a3e" // renovate:container // NodeMaintenanceOperatorImage is the image for the node maintenance operator. - NodeMaintenanceOperatorImage = "quay.io/medik8s/node-maintenance-operator:v0.17.0@sha256:bf1c5758b3d266dd6234422d156c67ffdd47f50f70ce17d5cef1de6065030337" // renovate:container + NodeMaintenanceOperatorImage = "quay.io/medik8s/node-maintenance-operator:v0.15.0@sha256:8cb8dad93283268282c30e75c68f4bd76b28def4b68b563d2f9db9c74225d634" // renovate:container // LogstashImage is the container image of logstash, used for log collection by debugd. - LogstashImage = "ghcr.io/edgelesssys/constellation/logstash-debugd:v2.24.0-pre.0.20250715233448-3b9f7530fbba@sha256:207e24406a5fddd62189b8cd89ea3751b78113109a2da828d9b5e9fbb1b44cc7" // renovate:container + LogstashImage = "ghcr.io/edgelesssys/constellation/logstash-debugd:v2.15.0-pre.0.20231220180720-ced03202a944@sha256:54e0beb2fad83509c1d79c866652bdd94125ce5a4c9947be8c63cd74a2079e70" // renovate:container // FilebeatImage is the container image of filebeat, used for log collection by debugd. - FilebeatImage = "ghcr.io/edgelesssys/constellation/filebeat-debugd:v2.24.0-pre.0.20250715233448-3b9f7530fbba@sha256:05bc4fb1679430de5a664f0c8cb43407a00f0217eeb3cbb54f7c1088ff968da6" // renovate:container + FilebeatImage = "ghcr.io/edgelesssys/constellation/filebeat-debugd:v2.15.0-pre.0.20231220180720-ced03202a944@sha256:1a57ad12dd0d1a7514f2360f37108925e103e7d0e5b8f24b12e8f266b78d570e" // renovate:container // MetricbeatImage is the container image of filebeat, used for log collection by debugd. - MetricbeatImage = "ghcr.io/edgelesssys/constellation/metricbeat-debugd:v2.24.0-pre.0.20250715233448-3b9f7530fbba@sha256:86532d8a3f37236dd8d4a30e0446c18a296643046d4127f66a643c263d556957" // renovate:container + MetricbeatImage = "ghcr.io/edgelesssys/constellation/metricbeat-debugd:v2.15.0-pre.0.20231220180720-ced03202a944@sha256:60bdd7cd868841385da230d4eab4600235b22fe1b3e0e865dda3f9720534ea7e" // renovate:container // currently supported versions. //nolint:revive - V1_30 ValidK8sVersion = "v1.30.14" // renovate:kubernetes-release + V1_27 ValidK8sVersion = "v1.27.9" // renovate:kubernetes-release //nolint:revive - V1_31 ValidK8sVersion = "v1.31.11" // renovate:kubernetes-release + V1_28 ValidK8sVersion = "v1.28.5" // renovate:kubernetes-release //nolint:revive - V1_32 ValidK8sVersion = "v1.32.7" // renovate:kubernetes-release + V1_29 ValidK8sVersion = "v1.29.0" // renovate:kubernetes-release // Default k8s version deployed by Constellation. - Default ValidK8sVersion = V1_31 + Default ValidK8sVersion = V1_28 ) // Regenerate the hashes by running go generate. @@ -197,206 +197,206 @@ const ( // VersionConfigs holds download URLs for all required kubernetes components for every supported version. var VersionConfigs = map[ValidK8sVersion]KubernetesVersion{ - V1_30: { - ClusterVersion: "v1.30.14", // renovate:kubernetes-release + V1_27: { + ClusterVersion: "v1.27.9", // renovate:kubernetes-release KubernetesComponents: components.Components{ { - Url: "https://github.com/containernetworking/plugins/releases/download/v1.7.1/cni-plugins-linux-amd64-v1.7.1.tgz", // renovate:cni-plugins-release - Hash: "sha256:1a28a0506bfe5bcdc981caf1a49eeab7e72da8321f1119b7be85f22621013098", + Url: "https://github.com/containernetworking/plugins/releases/download/v1.4.0/cni-plugins-linux-amd64-v1.4.0.tgz", // renovate:cni-plugins-release + Hash: "sha256:c2485ddb3ffc176578ae30ae58137f0b88e50f7c7f2af7d53a569276b2949a33", InstallPath: constants.CniPluginsDir, Extract: true, }, { - Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.33.0/crictl-v1.33.0-linux-amd64.tar.gz", // renovate:crictl-release - Hash: "sha256:8307399e714626e69d1213a4cd18c8dec3d0201ecdac009b1802115df8973f0f", + Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-amd64.tar.gz", // renovate:crictl-release + Hash: "sha256:8dc78774f7cbeaf787994d386eec663f0a3cf24de1ea4893598096cb39ef2508", InstallPath: constants.BinDir, Extract: true, }, { - Url: "https://dl.k8s.io/v1.30.14/bin/linux/amd64/kubelet", // renovate:kubernetes-release - Hash: "sha256:46baa60748b179164e80f5565d99dad642d554fb431925d211ffa921b917d5c7", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.27.9/bin/linux/amd64/kubelet", // renovate:kubernetes-release + Hash: "sha256:ede60eea3acbac3f35dbb23d7b148f45cf169ebbb20af102d3ce141fc0bac60c", InstallPath: constants.KubeletPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.30.14/bin/linux/amd64/kubeadm", // renovate:kubernetes-release - Hash: "sha256:bf1f8af81af8ecf003cbc03a8700c6e94a74c183ee092bbc77b92270ada2be70", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.27.9/bin/linux/amd64/kubeadm", // renovate:kubernetes-release + Hash: "sha256:78dddac376fa2f04116022cb44ed39ccb9cb0104e05c5b21b220d5151e5c0f86", InstallPath: constants.KubeadmPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.30.14/bin/linux/amd64/kubectl", // renovate:kubernetes-release - Hash: "sha256:7ccac981ece0098284d8961973295f5124d78eab7b89ba5023f35591baa16271", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.27.9/bin/linux/amd64/kubectl", // renovate:kubernetes-release + Hash: "sha256:d0caae91072297b2915dd65f6ef3055d27646dce821ec67d18da35ba9a8dc85b", InstallPath: constants.KubectlPath, Extract: false, }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjMwLjE0QHNoYTI1NjpiZTA3OWZlODVkNmI2ODA0Yjg5YWI0ZmRkNmEzNWNkNTYzNDFlOTllYTgwOTg4MWNmZTM3OTYyZjQ0MGRjMWJlIn1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjI3LjlAc2hhMjU2OjkyMjc0ZTgyZTI0NTJkYzA0ZmVkOTMzY2U2ZTQyMWM2NGUzMGQxNGQ4NjhkMDdiZmIwNzY5N2E5NjE0YTFkYjgifV0=", InstallPath: patchFilePath("kube-apiserver"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjMwLjE0QHNoYTI1NjplYmE0MWQ3NmI2YWYxMGFmOTQxMTQ3ZTJiNDQ5YzZkNjhlYWE0MmMxZmQwZmM4ZjBhYjhlYzJmZjBhYjg0OTY0In1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjI3LjlAc2hhMjU2OjczNWFlZmY1YTJlNjI4MmUwZWI3MjQ1YmUyNGIzZGEwNzYyOTdmOWU0ZmJhMmIzMjA5NGNjZjYxYTA4Y2NjYzIifV0=", InstallPath: patchFilePath("kube-controller-manager"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjMwLjE0QHNoYTI1Njo3NGE1Y2Y5Y2ZhOWZjYzIyNDZmNjhjNjUwZjFmNWM3YWRkMjBkYTIxNDVmMTM4MDBmZDk3YmExZDY5ZmMwNmM4In1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjI3LjlAc2hhMjU2OmYzOTc1OTU2YWQyMzY2N2NhOGY1NTdkNzY0MDQyNTNjYjdlODE1Y2E3Zjc3YWVkOTBlMWFlN2Q2NWU4OGYyYjEifV0=", InstallPath: patchFilePath("kube-scheduler"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjIxLTBAc2hhMjU2OmQ1OGMwMzVkZjU1NzA4MGEyNzM4N2Q2ODcwOTJlM2ZjMmI2NGM2ZDBlMzE2MmRjNTE0NTNhMTE1Zjg0N2QxMjEifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjEwLTBAc2hhMjU2OjIyZjg5MmQ3NjcyYWRjMGI5Yzg2ZGY2Nzc5MmFmZGI4YjJkYzA4ODgwZjQ5ZjY2OWVhYWE1OWM0N2Q3OTA4YzIifV0=", InstallPath: patchFilePath("etcd"), }, }, // CloudControllerManagerImageAWS is the CCM image used on AWS. - // Check for newer versions at https://github.com/kubernetes/cloud-provider-aws/releases. - CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.30.8@sha256:f4e82b924e967656d8df9f467c9c1915509e94e228b53840a7f9f2367e1ba9f5", // renovate:container + CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.27.2@sha256:42be09a2b13b4e69b42905639d6b005ebe1ca490aabefad427256abf2cc892c7", // renovate:container // CloudControllerManagerImageAzure is the CCM image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.30.14@sha256:d5e7cff5f9df6f251aa057bd101b241aa8123343e2285551ddeed14b43f3f380", // renovate:container + CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.27.10@sha256:3366e0e51c56643968c7e607cb27c2545948cfab5bff3bed85e314d93a689d8e", // renovate:container // CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.30.14@sha256:9bb1af3035dd0f581f18d63002e6bf59d4ba5f7ce1828f77c75d3a17f3154f9f", // renovate:container + CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.27.10@sha256:754d4eb709d0c5955af8bc46f5beccf0fa8c09551855a3810145b09af27d6656", // renovate:container // CloudControllerManagerImageGCP is the CCM image used on GCP. - CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v30.1.4@sha256:0c3695a18d3825492196facb092e5fe56e466fa8517cde5a206fe21630c1da13", // renovate:container + // TODO(3u13r): use newer "cloud-provider-gcp" from https://github.com/kubernetes/cloud-provider-gcp when newer releases are available. + CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v27.1.6@sha256:b097b4e5382ea1987db5996a9eaffb94fa224639b3464876f0b1b17f64509ac4", // renovate:container // CloudControllerManagerImageOpenStack is the CCM image used on OpenStack. CloudControllerManagerImageOpenStack: "docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.26.4@sha256:05e846fb13481b6dbe4a1e50491feb219e8f5101af6cf662a086115735624db0", // renovate:container // External service image. Depends on k8s version. // Check for new versions at https://github.com/kubernetes/autoscaler/releases. - ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.30.5@sha256:c63b6fc563a7e374fca8fa3ca226d58955fe92360cb93aaa76974fc5dbf5cee6", // renovate:container + ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.27.5@sha256:410ffc3f7307b6173c630de8de6e40175376c8c170d64b6c8b6e4baadda020df", // renovate:container }, - V1_31: { - ClusterVersion: "v1.31.11", // renovate:kubernetes-release + V1_28: { + ClusterVersion: "v1.28.5", // renovate:kubernetes-release KubernetesComponents: components.Components{ { - Url: "https://github.com/containernetworking/plugins/releases/download/v1.7.1/cni-plugins-linux-amd64-v1.7.1.tgz", // renovate:cni-plugins-release - Hash: "sha256:1a28a0506bfe5bcdc981caf1a49eeab7e72da8321f1119b7be85f22621013098", + Url: "https://github.com/containernetworking/plugins/releases/download/v1.4.0/cni-plugins-linux-amd64-v1.4.0.tgz", // renovate:cni-plugins-release + Hash: "sha256:c2485ddb3ffc176578ae30ae58137f0b88e50f7c7f2af7d53a569276b2949a33", InstallPath: constants.CniPluginsDir, Extract: true, }, { - Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.33.0/crictl-v1.33.0-linux-amd64.tar.gz", // renovate:crictl-release - Hash: "sha256:8307399e714626e69d1213a4cd18c8dec3d0201ecdac009b1802115df8973f0f", + Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-amd64.tar.gz", // renovate:crictl-release + Hash: "sha256:8dc78774f7cbeaf787994d386eec663f0a3cf24de1ea4893598096cb39ef2508", InstallPath: constants.BinDir, Extract: true, }, { - Url: "https://dl.k8s.io/v1.31.11/bin/linux/amd64/kubelet", // renovate:kubernetes-release - Hash: "sha256:7bdace3eb4c7a6d6b9cf3f9e84e5972b2885bf5bc20a92361ca527e5c228542f", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.28.5/bin/linux/amd64/kubelet", // renovate:kubernetes-release + Hash: "sha256:bf37335da58182783a8c63866ec1f895b4c436e3ed96bdd87fe3f8ae8004ba1d", InstallPath: constants.KubeletPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.31.11/bin/linux/amd64/kubeadm", // renovate:kubernetes-release - Hash: "sha256:d6bea121c00023eed6cebed7c2722b48543bff302142ec483f53aa1bed99c522", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.28.5/bin/linux/amd64/kubeadm", // renovate:kubernetes-release + Hash: "sha256:2b54078c5ea9e85b27f162f508e0bf834a2753e52a57e896812ec3dca92fe9cd", InstallPath: constants.KubeadmPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.31.11/bin/linux/amd64/kubectl", // renovate:kubernetes-release - Hash: "sha256:449674ed53789d63c94c147c689be986f4c135848ec91e1a64796ed896934b45", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.28.5/bin/linux/amd64/kubectl", // renovate:kubernetes-release + Hash: "sha256:2a44c0841b794d85b7819b505da2ff3acd5950bd1bcd956863714acc80653574", InstallPath: constants.KubectlPath, Extract: false, }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjMxLjExQHNoYTI1NjphM2QxYzQ0NDA4MTc3MjVhMWI1MDNhN2NjY2U5NGYzZGNlMmIyMDhlYmYyNTdiNDA1ZGMyZDk3ODE3ZGYzZGRlIn1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjI4LjVAc2hhMjU2OjRiYjZmNDZiYWE5ODA1MjM5OWVlMjI3MGQ1OTEyZWRiOTdkNGY4NjAyZWEyZTI3MDBmMDUyN2E4ODcyMjgxMTIifV0=", InstallPath: patchFilePath("kube-apiserver"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjMxLjExQHNoYTI1NjowZjE5ZGUxNTdmM2QyNTFmNWRkZWI2ZTlkMDI2ODk1YmM1NWNiMDI1OTI4NzRiMzI2ZmEzNDVjNTdlNWUyODQ4In1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjI4LjVAc2hhMjU2OjZlOGM5MTcxZjc0YTRlM2ZhZGVkY2U4Zjg2NWY1ODA5MmE1OTc2NTBhNzA5NzAyZjJiMTIyZjZjYTNiNmNkMzIifV0=", InstallPath: patchFilePath("kube-controller-manager"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjMxLjExQHNoYTI1NjoxYTliNTliM2JmYTZjMWYxOTExZjZmODY1YTc5NTYyMGM0NjFkMDc5ZTQxMzA2MWJiNzE5ODFjYWRkNjdmMzlkIn1d", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjI4LjVAc2hhMjU2OjlhNDhlMzNlNDU0YzkwNGNmNTNjMTQ0YThkMGFlM2Y1YTRjMmM1YmQwODZiODk1M2FkN2Q1YTYzN2I5YWEwMDcifV0=", InstallPath: patchFilePath("kube-scheduler"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjIxLTBAc2hhMjU2OmQ1OGMwMzVkZjU1NzA4MGEyNzM4N2Q2ODcwOTJlM2ZjMmI2NGM2ZDBlMzE2MmRjNTE0NTNhMTE1Zjg0N2QxMjEifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjEwLTBAc2hhMjU2OjIyZjg5MmQ3NjcyYWRjMGI5Yzg2ZGY2Nzc5MmFmZGI4YjJkYzA4ODgwZjQ5ZjY2OWVhYWE1OWM0N2Q3OTA4YzIifV0=", InstallPath: patchFilePath("etcd"), }, }, // CloudControllerManagerImageAWS is the CCM image used on AWS. - // Check for newer versions at https://github.com/kubernetes/cloud-provider-aws/releases. - CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.7@sha256:576bfe3bb1e2da8fe6312933a31f03f0b3b2729aeb44d84ce8d495abed04af09", // renovate:container + CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.1@sha256:79b423ac8bc52d00f932b40de11fc3047a5ed1cbec47cda23bcf8f45ef583ed1", // renovate:container // CloudControllerManagerImageAzure is the CCM image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.31.8@sha256:3dd373444b7ff407bb847fa5a21d6047d0cb03b81b98116037aa66006ce0c401", // renovate:container + CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.27.10@sha256:3366e0e51c56643968c7e607cb27c2545948cfab5bff3bed85e314d93a689d8e", // renovate:container // CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.31.8@sha256:ae68dc4ff970dd517daa4ed5264fe6b65fac0c2f8e513a89df014fea1306efd5", // renovate:container + CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.27.10@sha256:754d4eb709d0c5955af8bc46f5beccf0fa8c09551855a3810145b09af27d6656", // renovate:container // CloudControllerManagerImageGCP is the CCM image used on GCP. - CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v30.1.4@sha256:0c3695a18d3825492196facb092e5fe56e466fa8517cde5a206fe21630c1da13", // renovate:container + // TODO(3u13r): use newer "cloud-provider-gcp" from https://github.com/kubernetes/cloud-provider-gcp when newer releases are available. + CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v27.1.6@sha256:b097b4e5382ea1987db5996a9eaffb94fa224639b3464876f0b1b17f64509ac4", // renovate:container // CloudControllerManagerImageOpenStack is the CCM image used on OpenStack. - CloudControllerManagerImageOpenStack: "registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.33.1@sha256:de8a6da8c31c7b967625451a7169309d6f77aee1ff64b3f8e6ba8d8810ce2a22", // renovate:container + CloudControllerManagerImageOpenStack: "docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.26.4@sha256:05e846fb13481b6dbe4a1e50491feb219e8f5101af6cf662a086115735624db0", // renovate:container // External service image. Depends on k8s version. // Check for new versions at https://github.com/kubernetes/autoscaler/releases. - ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.31.3@sha256:b5ac5d93d0e43c6f4f14b0a3994ff905ed169aa0d614d7af702eca0a254cb8a8", // renovate:container + ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.27.5@sha256:410ffc3f7307b6173c630de8de6e40175376c8c170d64b6c8b6e4baadda020df", // renovate:container }, - V1_32: { - ClusterVersion: "v1.32.7", // renovate:kubernetes-release + V1_29: { + ClusterVersion: "v1.29.0", // renovate:kubernetes-release KubernetesComponents: components.Components{ { - Url: "https://github.com/containernetworking/plugins/releases/download/v1.7.1/cni-plugins-linux-amd64-v1.7.1.tgz", // renovate:cni-plugins-release - Hash: "sha256:1a28a0506bfe5bcdc981caf1a49eeab7e72da8321f1119b7be85f22621013098", + Url: "https://github.com/containernetworking/plugins/releases/download/v1.4.0/cni-plugins-linux-amd64-v1.4.0.tgz", // renovate:cni-plugins-release + Hash: "sha256:c2485ddb3ffc176578ae30ae58137f0b88e50f7c7f2af7d53a569276b2949a33", InstallPath: constants.CniPluginsDir, Extract: true, }, { - Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.33.0/crictl-v1.33.0-linux-amd64.tar.gz", // renovate:crictl-release - Hash: "sha256:8307399e714626e69d1213a4cd18c8dec3d0201ecdac009b1802115df8973f0f", + Url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz", // renovate:crictl-release + Hash: "sha256:d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb", InstallPath: constants.BinDir, Extract: true, }, { - Url: "https://dl.k8s.io/v1.32.7/bin/linux/amd64/kubelet", // renovate:kubernetes-release - Hash: "sha256:7ab96898436475640cbd416b2446f33aba1c2cb62dae876302ff7775d850041c", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.29.0/bin/linux/amd64/kubelet", // renovate:kubernetes-release + Hash: "sha256:e1c38137db8d8777eed8813646b59bf4d22d19b9011ab11dc28e2e34f6b80a05", InstallPath: constants.KubeletPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.32.7/bin/linux/amd64/kubeadm", // renovate:kubernetes-release - Hash: "sha256:dcd40af0042c559f3218dbd23bf318b850a5213528b428e1637ccb357ac32498", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.29.0/bin/linux/amd64/kubeadm", // renovate:kubernetes-release + Hash: "sha256:629d4630657caace9c819fd3797f4a70c397fbd41a2a7e464a0507dad675d52c", InstallPath: constants.KubeadmPath, Extract: false, }, { - Url: "https://dl.k8s.io/v1.32.7/bin/linux/amd64/kubectl", // renovate:kubernetes-release - Hash: "sha256:b8f24d467a8963354b028796a85904824d636132bef00988394cadacffe959c9", + Url: "https://storage.googleapis.com/kubernetes-release/release/v1.29.0/bin/linux/amd64/kubectl", // renovate:kubernetes-release + Hash: "sha256:0e03ab096163f61ab610b33f37f55709d3af8e16e4dcc1eb682882ef80f96fd5", InstallPath: constants.KubectlPath, Extract: false, }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjMyLjdAc2hhMjU2OmUwNGY2MjIzZDUyZjgwNDFjNDZlZjQ1NDVjY2FmMDc4OTRiMWNhNTg1MTUwNmE5MTQyNzA2ZDQyMDY5MTFmNjQifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtYXBpc2VydmVyOnYxLjI5LjBAc2hhMjU2OjkyMWQ5ZDRjZGE0MGJkNDgxMjgzMzc1ZDM5ZDEyYjI0ZjUxMjgxNjgyYWU0MWY2ZGE0N2Y2OWNiMDcyNjQzYmMifV0=", InstallPath: patchFilePath("kube-apiserver"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjMyLjdAc2hhMjU2OjZjN2YyODhhYjAxODFlNDk2NjA2YTQzZGJhZGU5NTQ4MTlhZjJiMWUxYzA1NTJiZWNmNjkwMzQzNmUxNmVhNzUifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtY29udHJvbGxlci1tYW5hZ2VyOnYxLjI5LjBAc2hhMjU2OmQxZTM4ZWEyNWIyN2U1N2I0MTk5NWVmNTlhZDc2ZGQzMzQ4MTg1M2E1YjhkMWE5MWFiYjdhOGJlMzJiN2U3ZGEifV0=", InstallPath: patchFilePath("kube-controller-manager"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjMyLjdAc2hhMjU2OjFjMzVhOTcwYjQ0NTBiNDI4NTUzMTQ5NWJlODJjZGExZjY1NDk5NTJmNzBkNmUzZGU4ZGI1N2MyMGEzZGE0Y2UifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2t1YmUtc2NoZWR1bGVyOnYxLjI5LjBAc2hhMjU2OjVkZjMxMDIzNGU0Zjk0NjNiMTVkMTY2Nzc4ZDY5NzgzMGE1MWMwMDM3ZmYyOGExNzU5ZGFhYWQyZDNjZGU5OTEifV0=", InstallPath: patchFilePath("kube-scheduler"), }, { - Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjIxLTBAc2hhMjU2OmQ1OGMwMzVkZjU1NzA4MGEyNzM4N2Q2ODcwOTJlM2ZjMmI2NGM2ZDBlMzE2MmRjNTE0NTNhMTE1Zjg0N2QxMjEifV0=", + Url: "data:application/json;base64,W3sib3AiOiJyZXBsYWNlIiwicGF0aCI6Ii9zcGVjL2NvbnRhaW5lcnMvMC9pbWFnZSIsInZhbHVlIjoicmVnaXN0cnkuazhzLmlvL2V0Y2Q6My41LjEwLTBAc2hhMjU2OjIyZjg5MmQ3NjcyYWRjMGI5Yzg2ZGY2Nzc5MmFmZGI4YjJkYzA4ODgwZjQ5ZjY2OWVhYWE1OWM0N2Q3OTA4YzIifV0=", InstallPath: patchFilePath("etcd"), }, }, // CloudControllerManagerImageAWS is the CCM image used on AWS. - // Check for newer versions at https://github.com/kubernetes/cloud-provider-aws/releases. - CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.32.3@sha256:894dc5ce38646acad312a722e29ee7641aa5032aba5b134ebb98462b492f9bc6", // renovate:container + CloudControllerManagerImageAWS: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.29.1@sha256:1652c12636b3ec5c512c6d66116608911def51fb5e69ccc1929456e077b456f9", // renovate:container // CloudControllerManagerImageAzure is the CCM image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.32.7@sha256:caba156c6286d6cbc791885b087e530db44d30622cd36799c48ba6e9ddf555b5", // renovate:container + CloudControllerManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager:v1.28.4@sha256:eaed60d42a04bbce6a33aeb6ca764f1f8b079adf7eb82f7b4d3663c879bfbef9", // renovate:container // CloudNodeManagerImageAzure is the cloud-node-manager image used on Azure. // Check for newer versions at https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/README.md. - CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.32.7@sha256:31b91d1af2cdb21f7706e67a44de27f96a0a5835a207cfc5efc5da069b73dc11", // renovate:container + CloudNodeManagerImageAzure: "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.28.4@sha256:af9b2441ef309c34149076ebad52ef7dc0a0f3aacfd1ed4d634e7fcf7f97092b", // renovate:container // CloudControllerManagerImageGCP is the CCM image used on GCP. - CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v30.1.4@sha256:0c3695a18d3825492196facb092e5fe56e466fa8517cde5a206fe21630c1da13", // renovate:container + // TODO(3u13r): use newer "cloud-provider-gcp" from https://github.com/kubernetes/cloud-provider-gcp when newer releases are available. + CloudControllerManagerImageGCP: "ghcr.io/edgelesssys/cloud-provider-gcp:v29.0.0@sha256:576e11bab05de167aa6d89477050a55ac9b2ddbd505b8a5be70a5ae71a08ecce", // renovate:container // CloudControllerManagerImageOpenStack is the CCM image used on OpenStack. - CloudControllerManagerImageOpenStack: "registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.33.1@sha256:de8a6da8c31c7b967625451a7169309d6f77aee1ff64b3f8e6ba8d8810ce2a22", // renovate:container + CloudControllerManagerImageOpenStack: "docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.26.4@sha256:05e846fb13481b6dbe4a1e50491feb219e8f5101af6cf662a086115735624db0", // renovate:container // External service image. Depends on k8s version. // Check for new versions at https://github.com/kubernetes/autoscaler/releases. - ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.32.2@sha256:c627d8d159a8cd6705a2612afd8a9ffc499e2fc37bad3525364427439b9224c0", // renovate:container + ClusterAutoscalerImage: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.29.0@sha256:808185c1090107f06ea69b0a5e507e387ad2ee3a3b12b7cd08ea0dac730cf58b", // renovate:container }, } @@ -406,9 +406,9 @@ type KubernetesVersion struct { KubernetesComponents components.Components CloudControllerManagerImageAWS string // k8s version dependency. CloudControllerManagerImageAzure string // k8s version dependency. - CloudControllerManagerImageGCP string // Published by .github/workflows/build-ccm-gcp.yml because of https://github.com/kubernetes/cloud-provider-gcp/issues/289. + CloudControllerManagerImageGCP string // Using self-built image until resolved: https://github.com/kubernetes/cloud-provider-gcp/issues/289 CloudControllerManagerImageOpenStack string // k8s version dependency. - CloudNodeManagerImageAzure string // k8s version dependency. Same version as Azure's CCM image above. + CloudNodeManagerImageAzure string // k8s version dependency. Same version as above. ClusterAutoscalerImage string // Matches k8s versioning scheme. } diff --git a/internal/versions/versions_test.go b/internal/versions/versions_test.go index 1e62d1091..34bc99859 100644 --- a/internal/versions/versions_test.go +++ b/internal/versions/versions_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package versions diff --git a/joinservice/cmd/main.go b/joinservice/cmd/main.go index 425c4c4ac..8aaab9654 100644 --- a/joinservice/cmd/main.go +++ b/joinservice/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -116,7 +116,6 @@ func main() { keyServiceClient, kubeClient, log.WithGroup("server"), - file.NewHandler(afero.NewOsFs()), ) if err != nil { log.With(slog.Any("error", err)).Error("Failed to create server") diff --git a/joinservice/internal/certcache/amdkds/amdkds.go b/joinservice/internal/certcache/amdkds/amdkds.go index 0f2d14468..8b1a9b131 100644 --- a/joinservice/internal/certcache/amdkds/amdkds.go +++ b/joinservice/internal/certcache/amdkds/amdkds.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // The AMDKDS package implements interaction with the AMD KDS (Key Distribution Service). diff --git a/joinservice/internal/certcache/amdkds/amdkds_test.go b/joinservice/internal/certcache/amdkds/amdkds_test.go index 08e0ba2cf..1ce3706a9 100644 --- a/joinservice/internal/certcache/amdkds/amdkds_test.go +++ b/joinservice/internal/certcache/amdkds/amdkds_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package amdkds @@ -71,6 +71,6 @@ type stubGetter struct { } func (s *stubGetter) Get(url string) ([]byte, error) { - s.log.Debug(fmt.Sprintf("Request to %q", url)) + s.log.Debug(fmt.Sprintf("Request to %s", url)) return s.ret, s.err } diff --git a/joinservice/internal/certcache/amdkds/testdata/testdata.go b/joinservice/internal/certcache/amdkds/testdata/testdata.go index fea4953d3..4e4d4a40e 100644 --- a/joinservice/internal/certcache/amdkds/testdata/testdata.go +++ b/joinservice/internal/certcache/amdkds/testdata/testdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdata contains testing data for an attestation process. diff --git a/joinservice/internal/certcache/certcache.go b/joinservice/internal/certcache/certcache.go index 632863d61..cada6fd7c 100644 --- a/joinservice/internal/certcache/certcache.go +++ b/joinservice/internal/certcache/certcache.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package certcache implements an in-cluster SEV-SNP certificate cache. @@ -53,11 +53,11 @@ func (c *Client) CreateCertChainCache(ctx context.Context) (*CachedCerts, error) case variant.AWSSEVSNP{}: reportSigner = abi.VlekReportSigner default: - c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %q", c.attVariant)) + c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %s", c.attVariant)) return nil, nil } - c.log.Debug(fmt.Sprintf("Creating %q certificate chain cache", c.attVariant)) + c.log.Debug(fmt.Sprintf("Creating %s certificate chain cache", c.attVariant)) ask, ark, err := c.createCertChainCache(ctx, reportSigner) if err != nil { return nil, fmt.Errorf("creating %s certificate chain cache: %w", c.attVariant, err) diff --git a/joinservice/internal/certcache/certcache_test.go b/joinservice/internal/certcache/certcache_test.go index 34df8db13..a742d43c6 100644 --- a/joinservice/internal/certcache/certcache_test.go +++ b/joinservice/internal/certcache/certcache_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package certcache @@ -116,7 +116,7 @@ func TestCreateCertChainCache(t *testing.T) { assert := assert.New(t) require := require.New(t) - ctx := t.Context() + ctx := context.Background() c := &Client{ attVariant: variant.Dummy{}, @@ -204,7 +204,7 @@ func TestGetCertChainCache(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - ctx := t.Context() + ctx := context.Background() c := NewClient(logger.NewTest(t), tc.kubeClient, variant.Dummy{}) diff --git a/joinservice/internal/certcache/testdata/testdata.go b/joinservice/internal/certcache/testdata/testdata.go index a3b7ade1d..3830ccb16 100644 --- a/joinservice/internal/certcache/testdata/testdata.go +++ b/joinservice/internal/certcache/testdata/testdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package testdata contains testing data for an attestation process. diff --git a/joinservice/internal/kms/BUILD.bazel b/joinservice/internal/kms/BUILD.bazel index b3c7a3dc9..35f6a6bbb 100644 --- a/joinservice/internal/kms/BUILD.bazel +++ b/joinservice/internal/kms/BUILD.bazel @@ -8,7 +8,7 @@ go_library( visibility = ["//joinservice:__subpackages__"], deps = [ "//keyservice/keyserviceproto", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) @@ -21,7 +21,7 @@ go_test( "//internal/logger", "//keyservice/keyserviceproto", "@com_github_stretchr_testify//assert", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//test/bufconn", "@org_uber_go_goleak//:goleak", ], diff --git a/joinservice/internal/kms/kms.go b/joinservice/internal/kms/kms.go index 4b9c12aa9..a3a28147d 100644 --- a/joinservice/internal/kms/kms.go +++ b/joinservice/internal/kms/kms.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kms handles communication with Constellation's key service to request data encryption keys for new or rejoining nodes. @@ -39,7 +39,7 @@ func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byt // the KMS does not use aTLS since traffic is only routed through the Constellation cluster // cluster internal connections are considered trustworthy log.Info(fmt.Sprintf("Connecting to KMS at %s", c.endpoint)) - conn, err := grpc.NewClient(c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/joinservice/internal/kms/kms_test.go b/joinservice/internal/kms/kms_test.go index 974bce382..cd831ddc4 100644 --- a/joinservice/internal/kms/kms_test.go +++ b/joinservice/internal/kms/kms_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kms @@ -60,7 +60,7 @@ func TestGetDataKey(t *testing.T) { client.grpc = tc.client - res, err := client.GetDataKey(t.Context(), "disk-uuid", 32) + res, err := client.GetDataKey(context.Background(), "disk-uuid", 32) if tc.wantErr { assert.Error(err) } else { diff --git a/joinservice/internal/kubeadm/kubeadm.go b/joinservice/internal/kubeadm/kubeadm.go index 67a5f9475..28ac08740 100644 --- a/joinservice/internal/kubeadm/kubeadm.go +++ b/joinservice/internal/kubeadm/kubeadm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kubeadm handles joining of new nodes by creating Kubernetes Join Tokens. diff --git a/joinservice/internal/kubeadm/kubeadm_test.go b/joinservice/internal/kubeadm/kubeadm_test.go index 92a2cff03..e78d1c0bb 100644 --- a/joinservice/internal/kubeadm/kubeadm_test.go +++ b/joinservice/internal/kubeadm/kubeadm_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubeadm diff --git a/joinservice/internal/kubernetes/kubernetes.go b/joinservice/internal/kubernetes/kubernetes.go index ac161aafc..c71e4a801 100644 --- a/joinservice/internal/kubernetes/kubernetes.go +++ b/joinservice/internal/kubernetes/kubernetes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package kubernetes interacts with the Kubernetes API to update an fetch objects related to joining nodes. diff --git a/joinservice/internal/kubernetes/kubernetes_test.go b/joinservice/internal/kubernetes/kubernetes_test.go index bf7f97048..2d289a068 100644 --- a/joinservice/internal/kubernetes/kubernetes_test.go +++ b/joinservice/internal/kubernetes/kubernetes_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetes diff --git a/joinservice/internal/kubernetesca/kubernetesca.go b/joinservice/internal/kubernetesca/kubernetesca.go index 92d0d0ffc..f9173b2f6 100644 --- a/joinservice/internal/kubernetesca/kubernetesca.go +++ b/joinservice/internal/kubernetesca/kubernetesca.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // kubernetesca implements a certificate authority that uses the Kubernetes root CA to sign certificates. diff --git a/joinservice/internal/kubernetesca/kubernetesca_test.go b/joinservice/internal/kubernetesca/kubernetesca_test.go index 698551f7a..359ef7771 100644 --- a/joinservice/internal/kubernetesca/kubernetesca_test.go +++ b/joinservice/internal/kubernetesca/kubernetesca_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kubernetesca diff --git a/joinservice/internal/server/BUILD.bazel b/joinservice/internal/server/BUILD.bazel index 369d49f58..409c50c0e 100644 --- a/joinservice/internal/server/BUILD.bazel +++ b/joinservice/internal/server/BUILD.bazel @@ -10,18 +10,15 @@ go_library( "//internal/attestation", "//internal/constants", "//internal/crypto", - "//internal/file", "//internal/grpc/grpclog", "//internal/logger", "//internal/versions/components", "//joinservice/joinproto", - "@in_gopkg_yaml_v3//:yaml_v3", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//status", - "@org_golang_x_crypto//ssh", ], ) @@ -31,16 +28,12 @@ go_test( embed = [":server"], deps = [ "//internal/attestation", - "//internal/constants", - "//internal/file", "//internal/logger", "//internal/versions/components", "//joinservice/joinproto", - "@com_github_spf13_afero//:afero", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", - "@org_golang_x_crypto//ssh", "@org_uber_go_goleak//:goleak", ], ) diff --git a/joinservice/internal/server/server.go b/joinservice/internal/server/server.go index 1bfefb76d..0b8a98f10 100644 --- a/joinservice/internal/server/server.go +++ b/joinservice/internal/server/server.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package server implements the gRPC endpoint of Constellation's node join service. @@ -9,7 +9,6 @@ package server import ( "context" - "crypto/ed25519" "fmt" "log/slog" "net" @@ -18,17 +17,14 @@ import ( "github.com/edgelesssys/constellation/v2/internal/attestation" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto" - "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/grpc/grpclog" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "golang.org/x/crypto/ssh" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" - "gopkg.in/yaml.v3" kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" ) @@ -42,7 +38,6 @@ type Server struct { dataKeyGetter dataKeyGetter ca certificateAuthority kubeClient kubeClient - fileHandler file.Handler joinproto.UnimplementedAPIServer } @@ -50,7 +45,6 @@ type Server struct { func New( measurementSalt []byte, ca certificateAuthority, joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter, kubeClient kubeClient, log *slog.Logger, - fileHandler file.Handler, ) (*Server, error) { return &Server{ measurementSalt: measurementSalt, @@ -59,17 +53,15 @@ func New( dataKeyGetter: dataKeyGetter, ca: ca, kubeClient: kubeClient, - fileHandler: fileHandler, }, nil } // Run starts the gRPC server on the given port, using the provided tlsConfig. func (s *Server) Run(creds credentials.TransportCredentials, port string) error { - grpcLog := logger.GRPCLogger(s.log) - logger.ReplaceGRPCLogger(grpcLog) + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC")) grpcServer := grpc.NewServer( grpc.Creds(creds), - logger.GetServerUnaryInterceptor(grpcLog), + logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")), ) joinproto.RegisterAPIServer(grpcServer, s) @@ -107,34 +99,6 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi return nil, status.Errorf(codes.Internal, "getting key for stateful disk: %s", err) } - log.Info("Requesting emergency SSH CA derivation key") - sshCAKeySeed, err := s.dataKeyGetter.GetDataKey(ctx, constants.SSHCAKeySuffix, ed25519.SeedSize) - if err != nil { - log.With(slog.Any("error", err)).Error("Failed to get seed material to derive SSH CA key") - return nil, status.Errorf(codes.Internal, "getting emergency SSH CA seed material: %s", err) - } - ca, err := crypto.GenerateEmergencySSHCAKey(sshCAKeySeed) - if err != nil { - log.With(slog.Any("error", err)).Error("Failed to derive ssh CA key from seed material") - return nil, status.Errorf(codes.Internal, "generating ssh emergency CA key: %s", err) - } - - principalList := s.extendPrincipals(req.HostCertificatePrincipals) - if len(principalList) == 0 { - principalList = append(principalList, grpclog.PeerAddrFromContext(ctx)) - } - - publicKey, err := ssh.ParsePublicKey(req.HostPublicKey) - if err != nil { - log.With(slog.Any("error", err)).Error("Failed to parse host public key") - return nil, status.Errorf(codes.Internal, "unmarshalling host public key: %s", err) - } - hostCertificate, err := crypto.GenerateSSHHostCertificate(principalList, publicKey, ca) - if err != nil { - log.With(slog.Any("error", err)).Error("Failed to generate and sign SSH host key") - return nil, status.Errorf(codes.Internal, "generating and signing SSH host key: %s", err) - } - log.Info("Creating Kubernetes join token") kubeArgs, err := s.joinTokenGetter.GetJoinToken(constants.KubernetesJoinTokenTTL) if err != nil { @@ -202,8 +166,6 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi KubeletCert: kubeletCert, ControlPlaneFiles: controlPlaneFiles, KubernetesComponents: components, - AuthorizedCaPublicKey: ssh.MarshalAuthorizedKey(ca.PublicKey()), - HostCertificate: ssh.MarshalAuthorizedKey(hostCertificate), }, nil } @@ -267,48 +229,3 @@ type kubeClient interface { GetComponents(ctx context.Context, configMapName string) (components.Components, error) AddNodeToJoiningNodes(ctx context.Context, nodeName string, componentsHash string, isControlPlane bool) error } - -func (s *Server) extendPrincipals(principals []string) []string { - clusterConfigYAML, err := s.fileHandler.Read("/var/kubeadm-config/ClusterConfiguration") - if err != nil { - s.log.Error("Failed to read kubeadm ClusterConfiguration file", "error", err) - return principals - } - - var obj map[string]any - if err := yaml.Unmarshal(clusterConfigYAML, &obj); err != nil { - s.log.Error("Failed to unmarshal ClusterConfiguration file", "error", err) - return principals - } - apiServerAny, ok := obj["apiServer"] - if !ok { - s.log.Error("ClusterConfig has no apiServer field") - return principals - } - apiServerCfg, ok := apiServerAny.(map[string]any) - if !ok { - s.log.Error("Unexpected type of ClusterConfig.apiServer field", "type", fmt.Sprintf("%T", apiServerAny)) - return principals - } - certSANsAny, ok := apiServerCfg["certSANs"] - if !ok { - s.log.Error("ClusterConfig.apiServer has no certSANs field") - return principals - } - certSANsListAny, ok := certSANsAny.([]any) - if !ok { - s.log.Error("Unexpected type of ClusterConfig.apiServer.certSANs field", "type", fmt.Sprintf("%T", certSANsAny)) - return principals - } - // Don't append into the input slice. - principals = append([]string{}, principals...) - for i, sanAny := range certSANsListAny { - san, ok := sanAny.(string) - if !ok { - s.log.Error("Unexpected type of ClusterConfig.apiServer.certSANs field", "index", i, "type", fmt.Sprintf("%T", sanAny)) - } - principals = append(principals, san) - } - - return principals -} diff --git a/joinservice/internal/server/server_test.go b/joinservice/internal/server/server_test.go index 883660dc3..ff11c7b57 100644 --- a/joinservice/internal/server/server_test.go +++ b/joinservice/internal/server/server_test.go @@ -1,29 +1,24 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package server import ( "context" - "crypto/ed25519" "errors" "testing" "time" "github.com/edgelesssys/constellation/v2/internal/attestation" - "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/goleak" - "golang.org/x/crypto/ssh" kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" ) @@ -34,16 +29,10 @@ func TestMain(m *testing.M) { func TestIssueJoinTicket(t *testing.T) { someErr := errors.New("error") testKey := []byte{0x1, 0x2, 0x3} - testCaKey := make([]byte, ed25519.SeedSize) testCert := []byte{0x4, 0x5, 0x6} measurementSecret := []byte{0x7, 0x8, 0x9} uuid := "uuid" - pubkey, _, err := ed25519.GenerateKey(nil) - require.NoError(t, err) - hostSSHPubKey, err := ssh.NewPublicKey(pubkey) - require.NoError(t, err) - testJoinToken := &kubeadmv1.BootstrapTokenDiscovery{ APIServerEndpoint: "192.0.2.1", CACertHashes: []string{"hash"}, @@ -60,22 +49,19 @@ func TestIssueJoinTicket(t *testing.T) { } testCases := map[string]struct { - isControlPlane bool - kubeadm stubTokenGetter - kms stubKeyGetter - ca stubCA - kubeClient stubKubeClient - missingComponentsReferenceFile bool - missingAdditionalPrincipalsFile bool - missingSSHHostKey bool - wantErr bool + isControlPlane bool + kubeadm stubTokenGetter + kms stubKeyGetter + ca stubCA + kubeClient stubKubeClient + missingComponentsReferenceFile bool + wantErr bool }{ "worker node": { kubeadm: stubTokenGetter{token: testJoinToken}, kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -85,7 +71,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsErr: someErr}, @@ -96,7 +81,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node", getNameErr: someErr}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -107,7 +91,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, addNodeToJoiningNodesErr: someErr, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -125,7 +108,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -136,7 +118,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{getCertErr: someErr, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -151,7 +132,6 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, @@ -162,56 +142,11 @@ func TestIssueJoinTicket(t *testing.T) { kms: stubKeyGetter{dataKeys: map[string][]byte{ uuid: testKey, attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, }}, ca: stubCA{cert: testCert, nodeName: "node"}, kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, wantErr: true, }, - "CA data key to short": { - kubeadm: stubTokenGetter{token: testJoinToken}, - kms: stubKeyGetter{dataKeys: map[string][]byte{ - uuid: testKey, - attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testKey, - }}, - ca: stubCA{cert: testCert, nodeName: "node"}, - kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, - wantErr: true, - }, - "CA data key doesn't exist": { - kubeadm: stubTokenGetter{token: testJoinToken}, - kms: stubKeyGetter{dataKeys: map[string][]byte{ - uuid: testKey, - attestation.MeasurementSecretContext: measurementSecret, - }}, - ca: stubCA{cert: testCert, nodeName: "node"}, - kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, - wantErr: true, - }, - "Additional principals file is missing": { - kubeadm: stubTokenGetter{token: testJoinToken}, - kms: stubKeyGetter{dataKeys: map[string][]byte{ - uuid: testKey, - attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, - }}, - ca: stubCA{cert: testCert, nodeName: "node"}, - kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, - missingAdditionalPrincipalsFile: true, - }, - "Host pubkey is missing": { - kubeadm: stubTokenGetter{token: testJoinToken}, - kms: stubKeyGetter{dataKeys: map[string][]byte{ - uuid: testKey, - attestation.MeasurementSecretContext: measurementSecret, - constants.SSHCAKeySuffix: testCaKey, - }}, - ca: stubCA{cert: testCert, nodeName: "node"}, - kubeClient: stubKubeClient{getComponentsVal: clusterComponents, getK8sComponentsRefFromNodeVersionCRDVal: "k8s-components-ref"}, - missingSSHHostKey: true, - wantErr: true, - }, } for name, tc := range testCases { @@ -221,11 +156,6 @@ func TestIssueJoinTicket(t *testing.T) { salt := []byte{0xA, 0xB, 0xC} - fh := file.NewHandler(afero.NewMemMapFs()) - if !tc.missingAdditionalPrincipalsFile { - require.NoError(fh.Write("/var/kubeadm-config/ClusterConfiguration", []byte(clusterConfig), file.OptMkdirAll)) - } - api := Server{ measurementSalt: salt, ca: tc.ca, @@ -233,22 +163,13 @@ func TestIssueJoinTicket(t *testing.T) { dataKeyGetter: tc.kms, kubeClient: &tc.kubeClient, log: logger.NewTest(t), - fileHandler: fh, - } - - var keyToSend []byte - if tc.missingSSHHostKey { - keyToSend = nil - } else { - keyToSend = hostSSHPubKey.Marshal() } req := &joinproto.IssueJoinTicketRequest{ DiskUuid: "uuid", IsControlPlane: tc.isControlPlane, - HostPublicKey: keyToSend, } - resp, err := api.IssueJoinTicket(t.Context(), req) + resp, err := api.IssueJoinTicket(context.Background(), req) if tc.wantErr { assert.Error(err) return @@ -307,13 +228,12 @@ func TestIssueRejoinTicker(t *testing.T) { joinTokenGetter: stubTokenGetter{}, dataKeyGetter: tc.keyGetter, log: logger.NewTest(t), - fileHandler: file.NewHandler(afero.NewMemMapFs()), } req := &joinproto.IssueRejoinTicketRequest{ DiskUuid: uuid, } - resp, err := api.IssueRejoinTicket(t.Context(), req) + resp, err := api.IssueRejoinTicket(context.Background(), req) if tc.wantErr { assert.Error(err) return @@ -390,70 +310,3 @@ func (s *stubKubeClient) AddNodeToJoiningNodes(_ context.Context, nodeName strin s.componentsRef = componentsRef return s.addNodeToJoiningNodesErr } - -const clusterConfig = ` -apiServer: - certSANs: - - "*" - extraArgs: - - name: audit-log-maxage - value: "30" - - name: audit-log-maxbackup - value: "10" - - name: audit-log-maxsize - value: "100" - - name: audit-log-path - value: /var/log/kubernetes/audit/audit.log - - name: audit-policy-file - value: /etc/kubernetes/audit-policy.yaml - - name: kubelet-certificate-authority - value: /etc/kubernetes/pki/ca.crt - - name: profiling - value: "false" - - name: tls-cipher-suites - value: TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - extraVolumes: - - hostPath: /var/log/kubernetes/audit/ - mountPath: /var/log/kubernetes/audit/ - name: audit-log - pathType: DirectoryOrCreate - - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit - pathType: File - readOnly: true -apiVersion: kubeadm.k8s.io/v1beta4 -caCertificateValidityPeriod: 87600h0m0s -certificateValidityPeriod: 8760h0m0s -certificatesDir: /etc/kubernetes/pki -clusterName: mr-cilium-7d6460ea -controlPlaneEndpoint: 34.8.0.20:6443 -controllerManager: - extraArgs: - - name: cloud-provider - value: external - - name: configure-cloud-routes - value: "false" - - name: flex-volume-plugin-dir - value: /opt/libexec/kubernetes/kubelet-plugins/volume/exec/ - - name: profiling - value: "false" - - name: terminated-pod-gc-threshold - value: "1000" -dns: {} -encryptionAlgorithm: RSA-2048 -etcd: - local: - dataDir: /var/lib/etcd -imageRepository: registry.k8s.io -kind: ClusterConfiguration -kubernetesVersion: v1.30.14 -networking: - dnsDomain: cluster.local - serviceSubnet: 10.96.0.0/12 -proxy: {} -scheduler: - extraArgs: - - name: profiling - value: "false" -` diff --git a/joinservice/internal/watcher/validator.go b/joinservice/internal/watcher/validator.go index 01d1c6f1f..6bf43635e 100644 --- a/joinservice/internal/watcher/validator.go +++ b/joinservice/internal/watcher/validator.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package watcher @@ -79,7 +79,7 @@ func (u *Updatable) Update() error { if err != nil { return fmt.Errorf("unmarshaling config: %w", err) } - u.log.Debug(fmt.Sprintf("New expected measurements: %s", cfg.GetMeasurements().String())) + u.log.Debug(fmt.Sprintf("New expected measurements: %+v", cfg.GetMeasurements())) cfgWithCerts, err := u.configWithCerts(cfg) if err != nil { diff --git a/joinservice/internal/watcher/validator_test.go b/joinservice/internal/watcher/validator_test.go index daecea3a1..efada4028 100644 --- a/joinservice/internal/watcher/validator_test.go +++ b/joinservice/internal/watcher/validator_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package watcher @@ -147,7 +147,7 @@ func TestUpdate(t *testing.T) { // test connection to server clientOID := variant.Dummy{} - resp, err := testConnection(t.Context(), require, server.URL, clientOID) + resp, err := testConnection(require, server.URL, clientOID) require.NoError(err) defer resp.Body.Close() body, err := io.ReadAll(resp.Body) @@ -159,7 +159,7 @@ func TestUpdate(t *testing.T) { require.NoError(validator.Update()) // client connection should fail now, since the server's validator expects a different OID from the client - resp, err = testConnection(t.Context(), require, server.URL, clientOID) + resp, err = testConnection(require, server.URL, clientOID) if err == nil { defer resp.Body.Close() } @@ -230,12 +230,12 @@ func TestUpdateConcurrency(t *testing.T) { wg.Wait() } -func testConnection(ctx context.Context, require *require.Assertions, url string, oid variant.Getter) (*http.Response, error) { +func testConnection(require *require.Assertions, url string, oid variant.Getter) (*http.Response, error) { clientConfig, err := atls.CreateAttestationClientTLSConfig(fakeIssuer{oid}, nil) require.NoError(err) client := http.Client{Transport: &http.Transport{TLSClientConfig: clientConfig}} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) require.NoError(err) return client.Do(req) } diff --git a/joinservice/internal/watcher/watcher.go b/joinservice/internal/watcher/watcher.go index 17b68ad21..3b0034edc 100644 --- a/joinservice/internal/watcher/watcher.go +++ b/joinservice/internal/watcher/watcher.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package watcher implements a file watcher to update an object on file changes. diff --git a/joinservice/internal/watcher/watcher_test.go b/joinservice/internal/watcher/watcher_test.go index 8c8b6dce4..b938ef95b 100644 --- a/joinservice/internal/watcher/watcher_test.go +++ b/joinservice/internal/watcher/watcher_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package watcher diff --git a/joinservice/joinproto/join.pb.go b/joinservice/joinproto/join.pb.go index a620ccbd5..5fe259256 100644 --- a/joinservice/joinproto/join.pb.go +++ b/joinservice/joinproto/join.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: joinservice/joinproto/join.proto package joinproto @@ -16,7 +16,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -27,21 +26,22 @@ const ( ) type IssueJoinTicketRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - DiskUuid string `protobuf:"bytes,1,opt,name=disk_uuid,json=diskUuid,proto3" json:"disk_uuid,omitempty"` - CertificateRequest []byte `protobuf:"bytes,2,opt,name=certificate_request,json=certificateRequest,proto3" json:"certificate_request,omitempty"` - IsControlPlane bool `protobuf:"varint,3,opt,name=is_control_plane,json=isControlPlane,proto3" json:"is_control_plane,omitempty"` - HostPublicKey []byte `protobuf:"bytes,4,opt,name=host_public_key,json=hostPublicKey,proto3" json:"host_public_key,omitempty"` - HostCertificatePrincipals []string `protobuf:"bytes,5,rep,name=host_certificate_principals,json=hostCertificatePrincipals,proto3" json:"host_certificate_principals,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DiskUuid string `protobuf:"bytes,1,opt,name=disk_uuid,json=diskUuid,proto3" json:"disk_uuid,omitempty"` + CertificateRequest []byte `protobuf:"bytes,2,opt,name=certificate_request,json=certificateRequest,proto3" json:"certificate_request,omitempty"` + IsControlPlane bool `protobuf:"varint,3,opt,name=is_control_plane,json=isControlPlane,proto3" json:"is_control_plane,omitempty"` } func (x *IssueJoinTicketRequest) Reset() { *x = IssueJoinTicketRequest{} - mi := &file_joinservice_joinproto_join_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_joinservice_joinproto_join_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *IssueJoinTicketRequest) String() string { @@ -52,7 +52,7 @@ func (*IssueJoinTicketRequest) ProtoMessage() {} func (x *IssueJoinTicketRequest) ProtoReflect() protoreflect.Message { mi := &file_joinservice_joinproto_join_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -88,22 +88,11 @@ func (x *IssueJoinTicketRequest) GetIsControlPlane() bool { return false } -func (x *IssueJoinTicketRequest) GetHostPublicKey() []byte { - if x != nil { - return x.HostPublicKey - } - return nil -} - -func (x *IssueJoinTicketRequest) GetHostCertificatePrincipals() []string { - if x != nil { - return x.HostCertificatePrincipals - } - return nil -} - type IssueJoinTicketResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + StateDiskKey []byte `protobuf:"bytes,1,opt,name=state_disk_key,json=stateDiskKey,proto3" json:"state_disk_key,omitempty"` MeasurementSalt []byte `protobuf:"bytes,2,opt,name=measurement_salt,json=measurementSalt,proto3" json:"measurement_salt,omitempty"` MeasurementSecret []byte `protobuf:"bytes,3,opt,name=measurement_secret,json=measurementSecret,proto3" json:"measurement_secret,omitempty"` @@ -114,17 +103,15 @@ type IssueJoinTicketResponse struct { ControlPlaneFiles []*ControlPlaneCertOrKey `protobuf:"bytes,8,rep,name=control_plane_files,json=controlPlaneFiles,proto3" json:"control_plane_files,omitempty"` KubernetesVersion string `protobuf:"bytes,9,opt,name=kubernetes_version,json=kubernetesVersion,proto3" json:"kubernetes_version,omitempty"` KubernetesComponents []*components.Component `protobuf:"bytes,10,rep,name=kubernetes_components,json=kubernetesComponents,proto3" json:"kubernetes_components,omitempty"` - AuthorizedCaPublicKey []byte `protobuf:"bytes,11,opt,name=authorized_ca_public_key,json=authorizedCaPublicKey,proto3" json:"authorized_ca_public_key,omitempty"` - HostCertificate []byte `protobuf:"bytes,12,opt,name=host_certificate,json=hostCertificate,proto3" json:"host_certificate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *IssueJoinTicketResponse) Reset() { *x = IssueJoinTicketResponse{} - mi := &file_joinservice_joinproto_join_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_joinservice_joinproto_join_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *IssueJoinTicketResponse) String() string { @@ -135,7 +122,7 @@ func (*IssueJoinTicketResponse) ProtoMessage() {} func (x *IssueJoinTicketResponse) ProtoReflect() protoreflect.Message { mi := &file_joinservice_joinproto_join_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -220,33 +207,22 @@ func (x *IssueJoinTicketResponse) GetKubernetesComponents() []*components.Compon return nil } -func (x *IssueJoinTicketResponse) GetAuthorizedCaPublicKey() []byte { - if x != nil { - return x.AuthorizedCaPublicKey - } - return nil -} - -func (x *IssueJoinTicketResponse) GetHostCertificate() []byte { - if x != nil { - return x.HostCertificate - } - return nil -} - type ControlPlaneCertOrKey struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } func (x *ControlPlaneCertOrKey) Reset() { *x = ControlPlaneCertOrKey{} - mi := &file_joinservice_joinproto_join_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_joinservice_joinproto_join_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ControlPlaneCertOrKey) String() string { @@ -257,7 +233,7 @@ func (*ControlPlaneCertOrKey) ProtoMessage() {} func (x *ControlPlaneCertOrKey) ProtoReflect() protoreflect.Message { mi := &file_joinservice_joinproto_join_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -287,17 +263,20 @@ func (x *ControlPlaneCertOrKey) GetData() []byte { } type IssueRejoinTicketRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - DiskUuid string `protobuf:"bytes,1,opt,name=disk_uuid,json=diskUuid,proto3" json:"disk_uuid,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DiskUuid string `protobuf:"bytes,1,opt,name=disk_uuid,json=diskUuid,proto3" json:"disk_uuid,omitempty"` } func (x *IssueRejoinTicketRequest) Reset() { *x = IssueRejoinTicketRequest{} - mi := &file_joinservice_joinproto_join_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_joinservice_joinproto_join_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *IssueRejoinTicketRequest) String() string { @@ -308,7 +287,7 @@ func (*IssueRejoinTicketRequest) ProtoMessage() {} func (x *IssueRejoinTicketRequest) ProtoReflect() protoreflect.Message { mi := &file_joinservice_joinproto_join_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -331,18 +310,21 @@ func (x *IssueRejoinTicketRequest) GetDiskUuid() string { } type IssueRejoinTicketResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - StateDiskKey []byte `protobuf:"bytes,1,opt,name=state_disk_key,json=stateDiskKey,proto3" json:"state_disk_key,omitempty"` - MeasurementSecret []byte `protobuf:"bytes,2,opt,name=measurement_secret,json=measurementSecret,proto3" json:"measurement_secret,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StateDiskKey []byte `protobuf:"bytes,1,opt,name=state_disk_key,json=stateDiskKey,proto3" json:"state_disk_key,omitempty"` + MeasurementSecret []byte `protobuf:"bytes,2,opt,name=measurement_secret,json=measurementSecret,proto3" json:"measurement_secret,omitempty"` } func (x *IssueRejoinTicketResponse) Reset() { *x = IssueRejoinTicketResponse{} - mi := &file_joinservice_joinproto_join_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_joinservice_joinproto_join_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *IssueRejoinTicketResponse) String() string { @@ -353,7 +335,7 @@ func (*IssueRejoinTicketResponse) ProtoMessage() {} func (x *IssueRejoinTicketResponse) ProtoReflect() protoreflect.Message { mi := &file_joinservice_joinproto_join_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -384,55 +366,102 @@ func (x *IssueRejoinTicketResponse) GetMeasurementSecret() []byte { var File_joinservice_joinproto_join_proto protoreflect.FileDescriptor -const file_joinservice_joinproto_join_proto_rawDesc = "" + - "\n" + - " joinservice/joinproto/join.proto\x12\x04join\x1a-internal/versions/components/components.proto\"\xf8\x01\n" + - "\x16IssueJoinTicketRequest\x12\x1b\n" + - "\tdisk_uuid\x18\x01 \x01(\tR\bdiskUuid\x12/\n" + - "\x13certificate_request\x18\x02 \x01(\fR\x12certificateRequest\x12(\n" + - "\x10is_control_plane\x18\x03 \x01(\bR\x0eisControlPlane\x12&\n" + - "\x0fhost_public_key\x18\x04 \x01(\fR\rhostPublicKey\x12>\n" + - "\x1bhost_certificate_principals\x18\x05 \x03(\tR\x19hostCertificatePrincipals\"\xf2\x04\n" + - "\x17IssueJoinTicketResponse\x12$\n" + - "\x0estate_disk_key\x18\x01 \x01(\fR\fstateDiskKey\x12)\n" + - "\x10measurement_salt\x18\x02 \x01(\fR\x0fmeasurementSalt\x12-\n" + - "\x12measurement_secret\x18\x03 \x01(\fR\x11measurementSecret\x12!\n" + - "\fkubelet_cert\x18\x04 \x01(\fR\vkubeletCert\x12.\n" + - "\x13api_server_endpoint\x18\x05 \x01(\tR\x11apiServerEndpoint\x12\x14\n" + - "\x05token\x18\x06 \x01(\tR\x05token\x12>\n" + - "\x1cdiscovery_token_ca_cert_hash\x18\a \x01(\tR\x18discoveryTokenCaCertHash\x12O\n" + - "\x13control_plane_files\x18\b \x03(\v2\x1f.join.control_plane_cert_or_keyR\x11controlPlaneFiles\x12-\n" + - "\x12kubernetes_version\x18\t \x01(\tR\x11kubernetesVersion\x12J\n" + - "\x15kubernetes_components\x18\n" + - " \x03(\v2\x15.components.ComponentR\x14kubernetesComponents\x127\n" + - "\x18authorized_ca_public_key\x18\v \x01(\fR\x15authorizedCaPublicKey\x12)\n" + - "\x10host_certificate\x18\f \x01(\fR\x0fhostCertificate\"C\n" + - "\x19control_plane_cert_or_key\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + - "\x04data\x18\x02 \x01(\fR\x04data\"7\n" + - "\x18IssueRejoinTicketRequest\x12\x1b\n" + - "\tdisk_uuid\x18\x01 \x01(\tR\bdiskUuid\"p\n" + - "\x19IssueRejoinTicketResponse\x12$\n" + - "\x0estate_disk_key\x18\x01 \x01(\fR\fstateDiskKey\x12-\n" + - "\x12measurement_secret\x18\x02 \x01(\fR\x11measurementSecret2\xab\x01\n" + - "\x03API\x12N\n" + - "\x0fIssueJoinTicket\x12\x1c.join.IssueJoinTicketRequest\x1a\x1d.join.IssueJoinTicketResponse\x12T\n" + - "\x11IssueRejoinTicket\x12\x1e.join.IssueRejoinTicketRequest\x1a\x1f.join.IssueRejoinTicketResponseB?Z=github.com/edgelesssys/constellation/v2/joinservice/joinprotob\x06proto3" +var file_joinservice_joinproto_join_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6a, 0x6f, + 0x69, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x04, 0x6a, 0x6f, 0x69, 0x6e, 0x1a, 0x2d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x16, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x28, 0x0a, 0x10, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x22, 0x8e, 0x04, 0x0a, 0x17, 0x49, + 0x73, 0x73, 0x75, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, + 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6c, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x61, 0x73, 0x75, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x11, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6b, 0x75, 0x62, 0x65, 0x6c, 0x65, + 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x6b, 0x75, + 0x62, 0x65, 0x6c, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x70, 0x69, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x3e, 0x0a, 0x1c, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x5f, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x61, 0x43, 0x65, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x4f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6a, + 0x6f, 0x69, 0x6e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x52, 0x11, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x75, + 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x4a, 0x0a, 0x15, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x14, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, + 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x43, 0x0a, 0x19, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x37, 0x0a, 0x18, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x6f, 0x69, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, 0x22, 0x70, 0x0a, 0x19, 0x49, 0x73, 0x73, + 0x75, 0x65, 0x52, 0x65, 0x6a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x12, + 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x32, 0xab, 0x01, 0x0a, 0x03, + 0x41, 0x50, 0x49, 0x12, 0x4e, 0x0a, 0x0f, 0x49, 0x73, 0x73, 0x75, 0x65, 0x4a, 0x6f, 0x69, 0x6e, + 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x2e, 0x49, 0x73, + 0x73, 0x75, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x2e, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x6f, + 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1e, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x2e, + 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x2e, + 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x6a, 0x6f, 0x69, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, + 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} var ( file_joinservice_joinproto_join_proto_rawDescOnce sync.Once - file_joinservice_joinproto_join_proto_rawDescData []byte + file_joinservice_joinproto_join_proto_rawDescData = file_joinservice_joinproto_join_proto_rawDesc ) func file_joinservice_joinproto_join_proto_rawDescGZIP() []byte { file_joinservice_joinproto_join_proto_rawDescOnce.Do(func() { - file_joinservice_joinproto_join_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_joinservice_joinproto_join_proto_rawDesc), len(file_joinservice_joinproto_join_proto_rawDesc))) + file_joinservice_joinproto_join_proto_rawDescData = protoimpl.X.CompressGZIP(file_joinservice_joinproto_join_proto_rawDescData) }) return file_joinservice_joinproto_join_proto_rawDescData } var file_joinservice_joinproto_join_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_joinservice_joinproto_join_proto_goTypes = []any{ +var file_joinservice_joinproto_join_proto_goTypes = []interface{}{ (*IssueJoinTicketRequest)(nil), // 0: join.IssueJoinTicketRequest (*IssueJoinTicketResponse)(nil), // 1: join.IssueJoinTicketResponse (*ControlPlaneCertOrKey)(nil), // 2: join.control_plane_cert_or_key @@ -459,11 +488,73 @@ func file_joinservice_joinproto_join_proto_init() { if File_joinservice_joinproto_join_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_joinservice_joinproto_join_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssueJoinTicketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_joinservice_joinproto_join_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssueJoinTicketResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_joinservice_joinproto_join_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlPlaneCertOrKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_joinservice_joinproto_join_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssueRejoinTicketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_joinservice_joinproto_join_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssueRejoinTicketResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_joinservice_joinproto_join_proto_rawDesc), len(file_joinservice_joinproto_join_proto_rawDesc)), + RawDescriptor: file_joinservice_joinproto_join_proto_rawDesc, NumEnums: 0, NumMessages: 5, NumExtensions: 0, @@ -474,6 +565,7 @@ func file_joinservice_joinproto_join_proto_init() { MessageInfos: file_joinservice_joinproto_join_proto_msgTypes, }.Build() File_joinservice_joinproto_join_proto = out.File + file_joinservice_joinproto_join_proto_rawDesc = nil file_joinservice_joinproto_join_proto_goTypes = nil file_joinservice_joinproto_join_proto_depIdxs = nil } diff --git a/joinservice/joinproto/join.proto b/joinservice/joinproto/join.proto index eed1163a6..2a910a039 100644 --- a/joinservice/joinproto/join.proto +++ b/joinservice/joinproto/join.proto @@ -20,10 +20,6 @@ message IssueJoinTicketRequest { bytes certificate_request = 2; // is_control_plane indicates whether the node is a control-plane node. bool is_control_plane = 3; - // host_public_key is the public host key that should be signed. - bytes host_public_key = 4; - // host_certificate_principals are principals that should be added to the host certificate. - repeated string host_certificate_principals = 5; } message IssueJoinTicketResponse { @@ -49,10 +45,6 @@ message IssueJoinTicketResponse { string kubernetes_version = 9; // kubernetes_components is a list of components to install on the node. repeated components.Component kubernetes_components = 10; - // authorized_ca_public_key is an ssh ca key that can be used to connect to a node in case of an emergency. - bytes authorized_ca_public_key = 11; - // host_certificate is the certificate that can be used to verify a nodes host key. - bytes host_certificate = 12; } message control_plane_cert_or_key { diff --git a/keyservice/cmd/main.go b/keyservice/cmd/main.go index 0c3193b16..756509a93 100644 --- a/keyservice/cmd/main.go +++ b/keyservice/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/keyservice/internal/server/BUILD.bazel b/keyservice/internal/server/BUILD.bazel index 22110e200..756c76f6d 100644 --- a/keyservice/internal/server/BUILD.bazel +++ b/keyservice/internal/server/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//internal/kms/kms", "//internal/logger", "//keyservice/keyserviceproto", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", ], diff --git a/keyservice/internal/server/server.go b/keyservice/internal/server/server.go index 0391cd46b..b7517bceb 100644 --- a/keyservice/internal/server/server.go +++ b/keyservice/internal/server/server.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package server implements an API to manage encryption keys. @@ -48,11 +48,9 @@ func (s *Server) Run(port string) error { return fmt.Errorf("failed to listen on port %s: %v", port, err) } - grpcLog := logger.GRPCLogger(s.log) - logger.ReplaceGRPCLogger(grpcLog) - - server := grpc.NewServer(logger.GetServerUnaryInterceptor(grpcLog)) + server := grpc.NewServer(logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC"))) keyserviceproto.RegisterAPIServer(server, s) + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC")) // start the server s.log.Info(fmt.Sprintf("Starting Constellation key management service on %s", listener.Addr().String())) diff --git a/keyservice/internal/server/server_test.go b/keyservice/internal/server/server_test.go index 505398e97..f5c2b2d3b 100644 --- a/keyservice/internal/server/server_test.go +++ b/keyservice/internal/server/server_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package server @@ -32,23 +32,23 @@ func TestGetDataKey(t *testing.T) { kms := &stubKMS{derivedKey: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5}} api := New(log, kms) - res, err := api.GetDataKey(t.Context(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) + res, err := api.GetDataKey(context.Background(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) require.NoError(err) assert.Equal(kms.derivedKey, res.DataKey) // Test no data key id - res, err = api.GetDataKey(t.Context(), &keyserviceproto.GetDataKeyRequest{Length: 32}) + res, err = api.GetDataKey(context.Background(), &keyserviceproto.GetDataKeyRequest{Length: 32}) require.Error(err) assert.Nil(res) // Test no / zero key length - res, err = api.GetDataKey(t.Context(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1"}) + res, err = api.GetDataKey(context.Background(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1"}) require.Error(err) assert.Nil(res) // Test derive key error api = New(log, &stubKMS{deriveKeyErr: errors.New("error")}) - res, err = api.GetDataKey(t.Context(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) + res, err = api.GetDataKey(context.Background(), &keyserviceproto.GetDataKeyRequest{DataKeyId: "1", Length: 32}) assert.Error(err) assert.Nil(res) } diff --git a/keyservice/keyserviceproto/keyservice.pb.go b/keyservice/keyserviceproto/keyservice.pb.go index ee7a23ff2..65beb0c55 100644 --- a/keyservice/keyserviceproto/keyservice.pb.go +++ b/keyservice/keyserviceproto/keyservice.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.1 +// protoc-gen-go v1.33.0 +// protoc v4.22.1 // source: keyservice/keyserviceproto/keyservice.proto package keyserviceproto @@ -15,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -26,18 +25,21 @@ const ( ) type GetDataKeyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - DataKeyId string `protobuf:"bytes,1,opt,name=data_key_id,json=dataKeyId,proto3" json:"data_key_id,omitempty"` - Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataKeyId string `protobuf:"bytes,1,opt,name=data_key_id,json=dataKeyId,proto3" json:"data_key_id,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` } func (x *GetDataKeyRequest) Reset() { *x = GetDataKeyRequest{} - mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetDataKeyRequest) String() string { @@ -48,7 +50,7 @@ func (*GetDataKeyRequest) ProtoMessage() {} func (x *GetDataKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -78,17 +80,20 @@ func (x *GetDataKeyRequest) GetLength() uint32 { } type GetDataKeyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - DataKey []byte `protobuf:"bytes,1,opt,name=data_key,json=dataKey,proto3" json:"data_key,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataKey []byte `protobuf:"bytes,1,opt,name=data_key,json=dataKey,proto3" json:"data_key,omitempty"` } func (x *GetDataKeyResponse) Reset() { *x = GetDataKeyResponse{} - mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetDataKeyResponse) String() string { @@ -99,7 +104,7 @@ func (*GetDataKeyResponse) ProtoMessage() {} func (x *GetDataKeyResponse) ProtoReflect() protoreflect.Message { mi := &file_keyservice_keyserviceproto_keyservice_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -123,32 +128,44 @@ func (x *GetDataKeyResponse) GetDataKey() []byte { var File_keyservice_keyserviceproto_keyservice_proto protoreflect.FileDescriptor -const file_keyservice_keyserviceproto_keyservice_proto_rawDesc = "" + - "\n" + - "+keyservice/keyserviceproto/keyservice.proto\x12\x03kms\"K\n" + - "\x11GetDataKeyRequest\x12\x1e\n" + - "\vdata_key_id\x18\x01 \x01(\tR\tdataKeyId\x12\x16\n" + - "\x06length\x18\x02 \x01(\rR\x06length\"/\n" + - "\x12GetDataKeyResponse\x12\x19\n" + - "\bdata_key\x18\x01 \x01(\fR\adataKey2D\n" + - "\x03API\x12=\n" + - "\n" + - "GetDataKey\x12\x16.kms.GetDataKeyRequest\x1a\x17.kms.GetDataKeyResponseBDZBgithub.com/edgelesssys/constellation/v2/keyservice/keyserviceprotob\x06proto3" +var file_keyservice_keyserviceproto_keyservice_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x6b, 0x65, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6b, 0x65, 0x79, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6b, 0x65, 0x79, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x6b, + 0x6d, 0x73, 0x22, 0x4b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, + 0x74, 0x61, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, + 0x2f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x32, 0x44, 0x0a, 0x03, 0x41, 0x50, 0x49, 0x12, 0x3d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, + 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, + 0x32, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6b, 0x65, 0x79, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} var ( file_keyservice_keyserviceproto_keyservice_proto_rawDescOnce sync.Once - file_keyservice_keyserviceproto_keyservice_proto_rawDescData []byte + file_keyservice_keyserviceproto_keyservice_proto_rawDescData = file_keyservice_keyserviceproto_keyservice_proto_rawDesc ) func file_keyservice_keyserviceproto_keyservice_proto_rawDescGZIP() []byte { file_keyservice_keyserviceproto_keyservice_proto_rawDescOnce.Do(func() { - file_keyservice_keyserviceproto_keyservice_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_keyservice_keyserviceproto_keyservice_proto_rawDesc), len(file_keyservice_keyserviceproto_keyservice_proto_rawDesc))) + file_keyservice_keyserviceproto_keyservice_proto_rawDescData = protoimpl.X.CompressGZIP(file_keyservice_keyserviceproto_keyservice_proto_rawDescData) }) return file_keyservice_keyserviceproto_keyservice_proto_rawDescData } var file_keyservice_keyserviceproto_keyservice_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_keyservice_keyserviceproto_keyservice_proto_goTypes = []any{ +var file_keyservice_keyserviceproto_keyservice_proto_goTypes = []interface{}{ (*GetDataKeyRequest)(nil), // 0: kms.GetDataKeyRequest (*GetDataKeyResponse)(nil), // 1: kms.GetDataKeyResponse } @@ -167,11 +184,37 @@ func file_keyservice_keyserviceproto_keyservice_proto_init() { if File_keyservice_keyserviceproto_keyservice_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_keyservice_keyserviceproto_keyservice_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDataKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_keyservice_keyserviceproto_keyservice_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDataKeyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_keyservice_keyserviceproto_keyservice_proto_rawDesc), len(file_keyservice_keyserviceproto_keyservice_proto_rawDesc)), + RawDescriptor: file_keyservice_keyserviceproto_keyservice_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -182,6 +225,7 @@ func file_keyservice_keyserviceproto_keyservice_proto_init() { MessageInfos: file_keyservice_keyserviceproto_keyservice_proto_msgTypes, }.Build() File_keyservice_keyserviceproto_keyservice_proto = out.File + file_keyservice_keyserviceproto_keyservice_proto_rawDesc = nil file_keyservice_keyserviceproto_keyservice_proto_goTypes = nil file_keyservice_keyserviceproto_keyservice_proto_depIdxs = nil } diff --git a/measurement-reader/cmd/main.go b/measurement-reader/cmd/main.go index 144babd6f..15ce68d2e 100644 --- a/measurement-reader/cmd/main.go +++ b/measurement-reader/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main @@ -30,7 +30,7 @@ func main() { var m []sorted.Measurement switch attestationVariant { - case variant.AWSNitroTPM{}, variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.AzureTrustedLaunch{}, variant.GCPSEVES{}, variant.GCPSEVSNP{}, variant.QEMUVTPM{}: + case variant.AWSNitroTPM{}, variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.AzureTrustedLaunch{}, variant.GCPSEVES{}, variant.QEMUVTPM{}: m, err = tpm.Measurements() if err != nil { log.With(slog.Any("error", err)).Error("Failed to read TPM measurements") diff --git a/measurement-reader/internal/sorted/sorted.go b/measurement-reader/internal/sorted/sorted.go index 21970be9e..242aeff92 100644 --- a/measurement-reader/internal/sorted/sorted.go +++ b/measurement-reader/internal/sorted/sorted.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package sorted defines a type for print-friendly sorted measurements and allows sorting TPM and TDX measurements. diff --git a/measurement-reader/internal/sorted/sorted_test.go b/measurement-reader/internal/sorted/sorted_test.go index 2c5ee11bd..6f64fc418 100644 --- a/measurement-reader/internal/sorted/sorted_test.go +++ b/measurement-reader/internal/sorted/sorted_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sorted diff --git a/measurement-reader/internal/tdx/tdx.go b/measurement-reader/internal/tdx/tdx.go index 62b8ed5b9..9c90aa8df 100644 --- a/measurement-reader/internal/tdx/tdx.go +++ b/measurement-reader/internal/tdx/tdx.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package tdx reads measurements from an Intel TDX guest. diff --git a/measurement-reader/internal/tpm/tpm.go b/measurement-reader/internal/tpm/tpm.go index b5c68b85d..c9de69533 100644 --- a/measurement-reader/internal/tpm/tpm.go +++ b/measurement-reader/internal/tpm/tpm.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package tpm reads measurements from a TPM. diff --git a/nix/cc/cryptsetup.nix b/nix/cc/cryptsetup.nix index 1aa8c8c37..9687e1019 100644 --- a/nix/cc/cryptsetup.nix +++ b/nix/cc/cryptsetup.nix @@ -11,7 +11,7 @@ pkgs.symlinkJoin { paths = packages; buildInputs = packages; postBuild = '' - tar -cf $out/closure.tar --mtime="@$SOURCE_DATE_EPOCH" --sort=name --hard-dereference ${closure} + tar -cf $out/closure.tar --mtime="@$SOURCE_DATE_EPOCH" --sort=name ${closure} echo "${rpath}" > $out/rpath cp ${cc}/nix-support/dynamic-linker $out/dynamic-linker ''; diff --git a/nix/cc/libvirt.nix b/nix/cc/libvirt.nix index afb739aaf..47660a97f 100644 --- a/nix/cc/libvirt.nix +++ b/nix/cc/libvirt.nix @@ -11,7 +11,7 @@ pkgs.symlinkJoin { paths = packages; buildInputs = packages; postBuild = '' - tar -cf $out/closure.tar --mtime="@$SOURCE_DATE_EPOCH" --sort=name --hard-dereference ${closure} + tar -cf $out/closure.tar --mtime="@$SOURCE_DATE_EPOCH" --sort=name ${closure} tar --transform 's+^./+bin/+' -cf $out/bin-linktree.tar --mtime="@$SOURCE_DATE_EPOCH" --sort=name -C $out/bin . echo "${rpath}" > $out/rpath cp ${cc}/nix-support/dynamic-linker $out/dynamic-linker diff --git a/nix/container/libvirtd_base.nix b/nix/container/libvirtd_base.nix index f58a2366a..5ebaf3e91 100644 --- a/nix/container/libvirtd_base.nix +++ b/nix/container/libvirtd_base.nix @@ -62,22 +62,12 @@ let ''; startScript = pkgsLinux.writeShellApplication { name = "start.sh"; - runtimeInputs = let nixpkgs24_11 = import "${pkgs.fetchFromGitHub { - # Pinned release which contains swtpm v0.8.2 - # Newer versions of NixOS package swtpm v0.10.0 with https://github.com/stefanberger/swtpm/pull/896 - # This release breaks MiniConstellation since either libvirt, or the Terraform libvirt provider - # tries to apply the TPM config twice, resulting in an error during the setup phase - owner = "NixOS"; - repo = "nixpkgs"; - tag = "24.11"; - hash = "sha256-CqCX4JG7UiHvkrBTpYC3wcEurvbtTADLbo3Ns2CEoL8="; - }}"{system = "x86_64-linux";}; in - with pkgsLinux; [ + runtimeInputs = with pkgsLinux; [ shadow coreutils libvirt qemu - nixpkgs24_11.swtpm + swtpm ]; text = '' set -euo pipefail diff --git a/nix/container/vpn/sidecar.sh b/nix/container/vpn/sidecar.sh index a77c8bc7a..c6720f41b 100755 --- a/nix/container/vpn/sidecar.sh +++ b/nix/container/vpn/sidecar.sh @@ -30,17 +30,10 @@ reconcile_sip_verification() { fi } -optional_mtu() { - if [ -n "${VPN_MTU}" ]; then - printf "mtu %s" "${VPN_MTU}" - fi -} - # Set up the route from the node network namespace to the VPN pod. reconcile_route() { for cidr in ${VPN_PEER_CIDRS}; do - # shellcheck disable=SC2046 # Word splitting is intentional here. - nsenter -t 1 -n ip route replace "${cidr}" via "$(myip)" $(optional_mtu) + nsenter -t 1 -n ip route replace "${cidr}" via "$(myip)" done } diff --git a/nix/lib/by-name/constellationRepoRoot/package.nix b/nix/lib/by-name/constellationRepoRoot/package.nix deleted file mode 100644 index 38c9cdefa..000000000 --- a/nix/lib/by-name/constellationRepoRoot/package.nix +++ /dev/null @@ -1,3 +0,0 @@ -# Returns a reference to the root path of the Constellation repository. - -_: ../../../../. diff --git a/nix/lib/by-name/constellationRepoRootSrc/package.nix b/nix/lib/by-name/constellationRepoRootSrc/package.nix deleted file mode 100644 index 462df7ab1..000000000 --- a/nix/lib/by-name/constellationRepoRootSrc/package.nix +++ /dev/null @@ -1,12 +0,0 @@ -# Returns a package set originating from the root of the Constellation repository. -# The `files` attribute is a list of paths relative to the root of the repository. - -{ lib }: -files: -let - filteredFiles = lib.map (subpath: lib.path.append lib.constellationRepoRoot subpath) files; -in -lib.fileset.toSource { - root = lib.constellationRepoRoot; - fileset = lib.fileset.unions filteredFiles; -} diff --git a/nix/lib/by-name/constellationVersion/package.nix b/nix/lib/by-name/constellationVersion/package.nix deleted file mode 100644 index b65ebe85e..000000000 --- a/nix/lib/by-name/constellationVersion/package.nix +++ /dev/null @@ -1,3 +0,0 @@ -# Returns the current Continuum version, as defined in `version.txt`. - -{ lib }: builtins.readFile (lib.path.append lib.constellationRepoRoot "version.txt") diff --git a/nix/lib/default.nix b/nix/lib/default.nix deleted file mode 100644 index bd731cbf3..000000000 --- a/nix/lib/default.nix +++ /dev/null @@ -1,8 +0,0 @@ -{ - lib, - callPackage, -}: -lib.packagesFromDirectoryRecursive { - inherit callPackage; - directory = ./by-name; -} diff --git a/nix/packages/by-name/buildConstellationGoModule/package.nix b/nix/packages/by-name/buildConstellationGoModule/package.nix deleted file mode 100644 index b58d020e0..000000000 --- a/nix/packages/by-name/buildConstellationGoModule/package.nix +++ /dev/null @@ -1,26 +0,0 @@ -# A 'wrapped' Go builder for Constellation, which doesn't require a `vendorHash` to be set in each package. -# Instead, one central vendor hash is set here, and all packages inherit it. - -{ - buildGo124Module, - constellation-canonical-go-package, -}: -args: -(buildGo124Module ( - { - # We run tests in CI, so don't run them at build time. - doCheck = false; - - # Disable CGO by default. - env.CGO_ENABLED = "0"; - } - // args -)).overrideAttrs - (_oldAttrs: { - inherit (constellation-canonical-go-package) - goModules - vendorHash - proxyVendor - deleteVendor - ; - }) diff --git a/nix/packages/by-name/constellation-canonical-go-package/package.nix b/nix/packages/by-name/constellation-canonical-go-package/package.nix deleted file mode 100644 index 0fd299ffd..000000000 --- a/nix/packages/by-name/constellation-canonical-go-package/package.nix +++ /dev/null @@ -1,19 +0,0 @@ -{ - lib, - buildGo124Module, -}: -buildGo124Module { - pname = "constellation-canonical-go-package"; - version = lib.constellationVersion; - - src = lib.constellationRepoRootSrc [ - "go.mod" - "go.sum" - ]; - - vendorHash = "sha256-McWiTTz1HTdG3x0LI87CF6oTRFtxSiV3LCCBJb9YG4U="; - - doCheck = false; - - proxyVendor = true; -} diff --git a/nix/packages/default.nix b/nix/packages/default.nix deleted file mode 100644 index bd731cbf3..000000000 --- a/nix/packages/default.nix +++ /dev/null @@ -1,8 +0,0 @@ -{ - lib, - callPackage, -}: -lib.packagesFromDirectoryRecursive { - inherit callPackage; - directory = ./by-name; -} diff --git a/nix/shells/default.nix b/nix/shells/default.nix index 8913bc25b..3d6e5a3c0 100644 --- a/nix/shells/default.nix +++ b/nix/shells/default.nix @@ -1,13 +1,7 @@ -{ - mkShell, - git, - bazel_7, - go, -}: +{ mkShell, git, bazel_7 }: mkShell { nativeBuildInputs = [ bazel_7 git - go ]; } diff --git a/operators/constellation-node-operator/Makefile b/operators/constellation-node-operator/Makefile index 777b1ecb1..ca2bd0906 100644 --- a/operators/constellation-node-operator/Makefile +++ b/operators/constellation-node-operator/Makefile @@ -86,11 +86,11 @@ help: ## Display this help. .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./" output:crd:artifacts:config=config/crd/bases .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./" .PHONY: fmt fmt: ## Run go fmt against code. @@ -162,7 +162,7 @@ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.16.4 +CONTROLLER_TOOLS_VERSION ?= v0.9.0 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize diff --git a/operators/constellation-node-operator/api/LICENSE b/operators/constellation-node-operator/api/LICENSE new file mode 100644 index 000000000..be3f7b28e --- /dev/null +++ b/operators/constellation-node-operator/api/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/operators/constellation-node-operator/api/api.go b/operators/constellation-node-operator/api/api.go index 3fb0031d6..83a9f7547 100644 --- a/operators/constellation-node-operator/api/api.go +++ b/operators/constellation-node-operator/api/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/operators/constellation-node-operator/api/v1alpha1/autoscalingstrategy_types.go b/operators/constellation-node-operator/api/v1alpha1/autoscalingstrategy_types.go index 5e1a72430..590450583 100644 --- a/operators/constellation-node-operator/api/v1alpha1/autoscalingstrategy_types.go +++ b/operators/constellation-node-operator/api/v1alpha1/autoscalingstrategy_types.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package v1alpha1 diff --git a/operators/constellation-node-operator/api/v1alpha1/groupversion_info.go b/operators/constellation-node-operator/api/v1alpha1/groupversion_info.go index e6a051566..a38fbef44 100644 --- a/operators/constellation-node-operator/api/v1alpha1/groupversion_info.go +++ b/operators/constellation-node-operator/api/v1alpha1/groupversion_info.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package v1alpha1 contains API Schema definitions for the update v1alpha1 API group diff --git a/operators/constellation-node-operator/api/v1alpha1/joiningnodes_types.go b/operators/constellation-node-operator/api/v1alpha1/joiningnodes_types.go index e3e3a2cc9..0eed07a7d 100644 --- a/operators/constellation-node-operator/api/v1alpha1/joiningnodes_types.go +++ b/operators/constellation-node-operator/api/v1alpha1/joiningnodes_types.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package v1alpha1 diff --git a/operators/constellation-node-operator/api/v1alpha1/nodeversion_types.go b/operators/constellation-node-operator/api/v1alpha1/nodeversion_types.go index 33d358674..f5d81dfed 100644 --- a/operators/constellation-node-operator/api/v1alpha1/nodeversion_types.go +++ b/operators/constellation-node-operator/api/v1alpha1/nodeversion_types.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package v1alpha1 diff --git a/operators/constellation-node-operator/api/v1alpha1/pendingnode_types.go b/operators/constellation-node-operator/api/v1alpha1/pendingnode_types.go index 720bd6f5a..62261b984 100644 --- a/operators/constellation-node-operator/api/v1alpha1/pendingnode_types.go +++ b/operators/constellation-node-operator/api/v1alpha1/pendingnode_types.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package v1alpha1 diff --git a/operators/constellation-node-operator/api/v1alpha1/scalinggroup_types.go b/operators/constellation-node-operator/api/v1alpha1/scalinggroup_types.go index 3c58bcff0..a2c50a453 100644 --- a/operators/constellation-node-operator/api/v1alpha1/scalinggroup_types.go +++ b/operators/constellation-node-operator/api/v1alpha1/scalinggroup_types.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package v1alpha1 diff --git a/operators/constellation-node-operator/api/v1alpha1/zz_generated.deepcopy.go b/operators/constellation-node-operator/api/v1alpha1/zz_generated.deepcopy.go index b25cfc280..198fc055c 100644 --- a/operators/constellation-node-operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/operators/constellation-node-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -1,4 +1,5 @@ //go:build !ignore_autogenerated +// +build !ignore_autogenerated // Code generated by controller-gen. DO NOT EDIT. diff --git a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_autoscalingstrategies.yaml b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_autoscalingstrategies.yaml index 9156e3e71..6a014a394 100644 --- a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_autoscalingstrategies.yaml +++ b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_autoscalingstrategies.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null name: autoscalingstrategies.update.edgeless.systems spec: group: update.edgeless.systems @@ -21,19 +22,14 @@ spec: API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object diff --git a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_joiningnodes.yaml b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_joiningnodes.yaml index 1beca7221..9c8e9e5a2 100644 --- a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_joiningnodes.yaml +++ b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_joiningnodes.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null name: joiningnodes.update.edgeless.systems spec: group: update.edgeless.systems @@ -20,19 +21,14 @@ spec: description: JoiningNode is the Schema for the joiningnodes API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object diff --git a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_nodeversions.yaml b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_nodeversions.yaml index 4b7f7b7e0..e4c435ec1 100644 --- a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_nodeversions.yaml +++ b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_nodeversions.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null name: nodeversions.update.edgeless.systems spec: group: update.edgeless.systems @@ -20,19 +21,14 @@ spec: description: NodeVersion is the Schema for the nodeversions API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -66,49 +62,65 @@ spec: description: AwaitingAnnotation is a list of nodes that are waiting for the operator to annotate them. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array budget: description: Budget is the amount of extra nodes that can be created @@ -119,35 +131,43 @@ spec: description: Conditions represent the latest available observations of an object's state items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -162,6 +182,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -177,386 +201,514 @@ spec: description: Donors is a list of outdated nodes that donate labels to heirs. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array heirs: description: Heirs is a list of nodes using the latest image that still need to inherit labels from donors. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array invalid: description: Invalid is a list of invalid nodes (nodes that cannot be processed by the operator due to missing information or transient faults). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array mints: description: Mints is a list of up to date nodes that will become heirs. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array obsolete: description: Obsolete is a list of obsolete nodes (nodes that have been created by the operator but are no longer needed). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array outdated: description: Outdated is a list of nodes that are using an outdated image. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array pending: description: Pending is a list of pending nodes (joining or leaving the cluster). items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array upToDate: description: UpToDate is a list of nodes that are using the latest image and labels. items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. + description: "ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, \"must refer only to types A and B\" or \"UID not honored\" + or \"name must be restricted\". Those cannot be well described + when embedded. 3. Inconsistent validation. Because the usages + are different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don't make new APIs embed an underspecified + API type they do not control. \n Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' type: string kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' type: string resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' type: string uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object - x-kubernetes-map-type: atomic type: array required: - activeclusterversionupgrade diff --git a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_pendingnodes.yaml b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_pendingnodes.yaml index c6cd2db6a..7c5b5618b 100644 --- a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_pendingnodes.yaml +++ b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_pendingnodes.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null name: pendingnodes.update.edgeless.systems spec: group: update.edgeless.systems @@ -20,19 +21,14 @@ spec: description: PendingNode is the Schema for the pendingnodes API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -40,11 +36,10 @@ spec: description: PendingNodeSpec defines the desired state of PendingNode. properties: deadline: - description: |- - Deadline is the deadline for reaching the goal state. - Joining nodes will be terminated if the deadline is exceeded. - Leaving nodes will remain as unschedulable to prevent data loss. - If not specified, the node may remain in the pending state indefinitely. + description: Deadline is the deadline for reaching the goal state. + Joining nodes will be terminated if the deadline is exceeded. Leaving + nodes will remain as unschedulable to prevent data loss. If not + specified, the node may remain in the pending state indefinitely. format: date-time type: string goal: diff --git a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_scalinggroups.yaml b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_scalinggroups.yaml index 5eed4ebc8..0f87fbab1 100644 --- a/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_scalinggroups.yaml +++ b/operators/constellation-node-operator/config/crd/bases/update.edgeless.systems_scalinggroups.yaml @@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null name: scalinggroups.update.edgeless.systems spec: group: update.edgeless.systems @@ -20,19 +21,14 @@ spec: description: ScalingGroup is the Schema for the scalinggroups API. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -61,8 +57,8 @@ spec: format: int32 type: integer nodeGroupName: - description: NodeGroupName is the human friendly name of the node - group as defined in the Constellation configuration. + description: NodeGroupName is the human friendly name of the node group + as defined in the Constellation configuration. type: string nodeImage: description: NodeVersion is the name of the NodeVersion resource. @@ -81,35 +77,43 @@ spec: description: Conditions represent the latest available observations of an object's state. items: - description: Condition contains details for one aspect of the current - state of this API Resource. + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -124,6 +128,10 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/operators/constellation-node-operator/config/manager/manager.yaml b/operators/constellation-node-operator/config/manager/manager.yaml index 314209305..32b39e301 100644 --- a/operators/constellation-node-operator/config/manager/manager.yaml +++ b/operators/constellation-node-operator/config/manager/manager.yaml @@ -31,9 +31,6 @@ spec: - /manager args: - --leader-elect - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json image: controller:latest name: manager securityContext: @@ -63,9 +60,6 @@ spec: - mountPath: /etc/gce name: gceconf readOnly: true - - mountPath: /var/secrets/google - name: gcekey - readOnly: true - mountPath: /etc/constellation-upgrade-agent.sock name: upgrade-agent-socket readOnly: true @@ -97,10 +91,6 @@ spec: configMap: name: gceconf optional: true - - name: gcekey - secret: - secretName: gcekey - optional: true - name: upgrade-agent-socket hostPath: path: /run/constellation-upgrade-agent.sock diff --git a/operators/constellation-node-operator/config/rbac/role.yaml b/operators/constellation-node-operator/config/rbac/role.yaml index a1f60ee8f..8700132c4 100644 --- a/operators/constellation-node-operator/config/rbac/role.yaml +++ b/operators/constellation-node-operator/config/rbac/role.yaml @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + creationTimestamp: null name: manager-role rules: - apiGroups: @@ -11,7 +12,6 @@ rules: verbs: - get - list - - watch - apiGroups: - "" resources: @@ -57,10 +57,6 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies - - joiningnodes - - nodeversions - - pendingnodes - - scalinggroups verbs: - create - delete @@ -73,20 +69,38 @@ rules: - update.edgeless.systems resources: - autoscalingstrategies/finalizers - - joiningnodes/finalizers - - nodeversions/finalizers - - pendingnodes/finalizers - - scalinggroups/finalizers verbs: - update - apiGroups: - update.edgeless.systems resources: - autoscalingstrategies/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - joiningnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: - joiningnodes/status - - nodeversions/status - - pendingnodes/status - - scalinggroups/status verbs: - get - patch @@ -105,3 +119,81 @@ rules: - nodeversion/status verbs: - get +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - nodeversions/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - pendingnodes/status + verbs: + - get + - patch + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/finalizers + verbs: + - update +- apiGroups: + - update.edgeless.systems + resources: + - scalinggroups/status + verbs: + - get + - patch + - update diff --git a/operators/constellation-node-operator/controllers/autoscalingstrategy_controller.go b/operators/constellation-node-operator/controllers/autoscalingstrategy_controller.go index de6bba9cf..9fa73ff24 100644 --- a/operators/constellation-node-operator/controllers/autoscalingstrategy_controller.go +++ b/operators/constellation-node-operator/controllers/autoscalingstrategy_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/autoscalingstrategy_controller_env_test.go b/operators/constellation-node-operator/controllers/autoscalingstrategy_controller_env_test.go index 07fa0460d..03a14b6dc 100644 --- a/operators/constellation-node-operator/controllers/autoscalingstrategy_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/autoscalingstrategy_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/client_test.go b/operators/constellation-node-operator/controllers/client_test.go index 9dc1f6999..d4ea516d1 100644 --- a/operators/constellation-node-operator/controllers/client_test.go +++ b/operators/constellation-node-operator/controllers/client_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/joiningnode_controller.go b/operators/constellation-node-operator/controllers/joiningnode_controller.go index f475e92c8..8ab45d54f 100644 --- a/operators/constellation-node-operator/controllers/joiningnode_controller.go +++ b/operators/constellation-node-operator/controllers/joiningnode_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/joiningnode_controller_env_test.go b/operators/constellation-node-operator/controllers/joiningnode_controller_env_test.go index 7b5fd2a7d..2662dff28 100644 --- a/operators/constellation-node-operator/controllers/joiningnode_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/joiningnode_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/nodeversion_controller.go b/operators/constellation-node-operator/controllers/nodeversion_controller.go index 5a94591d1..ff706c702 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_controller.go +++ b/operators/constellation-node-operator/controllers/nodeversion_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers @@ -83,7 +83,7 @@ func NewNodeVersionReconciler(nodeReplacer nodeReplacer, etcdRemover etcdRemover //+kubebuilder:rbac:groups=nodemaintenance.medik8s.io,resources=nodemaintenances,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get -//+kubebuilder:rbac:groups="",resources=configmaps,verbs=list;get;watch +//+kubebuilder:rbac:groups="",resources=configmaps,verbs=list;get // Reconcile replaces outdated nodes with new nodes as specified in the NodeVersion spec. func (r *NodeVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -214,7 +214,7 @@ func (r *NodeVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{Requeue: shouldRequeue}, nil } - newNodeConfig := newNodeConfig{desiredNodeVersion, groups.Outdated, groups.Donors, pendingNodeList.Items, scalingGroupByID, newNodesBudget} + newNodeConfig := newNodeConfig{desiredNodeVersion, groups.Outdated, pendingNodeList.Items, scalingGroupByID, newNodesBudget} if err := r.createNewNodes(ctx, newNodeConfig); err != nil { logr.Error(err, "Creating new nodes") return ctrl.Result{Requeue: shouldRequeue}, nil @@ -614,15 +614,6 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo if config.newNodesBudget < 1 || len(config.outdatedNodes) == 0 { return nil } - // We need to look at both the outdated nodes *and* the nodes that have already - // been moved to the donors here because even if a CP node has already been moved to - // the donors, we still want to defer worker upgrades until the new CP node is actually joined. - hasOutdatedControlPlanes := false - for _, entry := range append(config.outdatedNodes, config.donors...) { - if nodeutil.IsControlPlaneNode(&entry) { - hasOutdatedControlPlanes = true - } - } outdatedNodesPerScalingGroup := make(map[string]int) for _, node := range config.outdatedNodes { // skip outdated nodes that got assigned an heir in this Reconcile call @@ -657,12 +648,6 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo continue } if requiredNodesPerScalingGroup[scalingGroupID] == 0 { - logr.Info("No new nodes needed for scaling group", "scalingGroup", scalingGroupID) - continue - } - // if we are a worker group and still have outdated control planes, we must wait for them to be upgraded. - if hasOutdatedControlPlanes && scalingGroup.Spec.Role != updatev1alpha1.ControlPlaneRole { - logr.Info("There are still outdated control plane nodes which must be replaced first before this worker scaling group is upgraded", "scalingGroup", scalingGroupID) continue } for { @@ -694,7 +679,7 @@ func (r *NodeVersionReconciler) createNewNodes(ctx context.Context, config newNo if err := r.Create(ctx, pendingNode); err != nil { return err } - logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID, "requiredNodes", requiredNodesPerScalingGroup[scalingGroupID]) + logr.Info("Created new node", "createdNode", nodeName, "scalingGroup", scalingGroupID) requiredNodesPerScalingGroup[scalingGroupID]-- config.newNodesBudget-- } @@ -954,7 +939,6 @@ type kubernetesServerVersionGetter interface { type newNodeConfig struct { desiredNodeVersion updatev1alpha1.NodeVersion outdatedNodes []corev1.Node - donors []corev1.Node pendingNodes []updatev1alpha1.PendingNode scalingGroupByID map[string]updatev1alpha1.ScalingGroup newNodesBudget int diff --git a/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go b/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go index 2e093323b..7fa62ce3a 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/nodeversion_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/nodeversion_controller_test.go b/operators/constellation-node-operator/controllers/nodeversion_controller_test.go index fcc67a529..c9ae88042 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_controller_test.go +++ b/operators/constellation-node-operator/controllers/nodeversion_controller_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers @@ -123,7 +123,7 @@ func TestAnnotateNodes(t *testing.T) { }, }, } - annotated, invalid := reconciler.annotateNodes(t.Context(), []corev1.Node{tc.node}) + annotated, invalid := reconciler.annotateNodes(context.Background(), []corev1.Node{tc.node}) if tc.wantAnnotated == nil { assert.Len(annotated, 0) assert.Len(invalid, 1) @@ -226,7 +226,7 @@ func TestPairDonorsAndHeirs(t *testing.T) { }, } nodeImage := updatev1alpha1.NodeVersion{} - pairs := reconciler.pairDonorsAndHeirs(t.Context(), &nodeImage, []corev1.Node{tc.outdatedNode}, []mintNode{tc.mintNode}) + pairs := reconciler.pairDonorsAndHeirs(context.Background(), &nodeImage, []corev1.Node{tc.outdatedNode}, []mintNode{tc.mintNode}) if tc.wantPair == nil { assert.Len(pairs, 0) return @@ -315,7 +315,7 @@ func TestMatchDonorsAndHeirs(t *testing.T) { stubReaderClient: *newStubReaderClient(t, []runtime.Object{&tc.donor, &tc.heir}, nil, nil), }, } - pairs := reconciler.matchDonorsAndHeirs(t.Context(), nil, []corev1.Node{tc.donor}, []corev1.Node{tc.heir}) + pairs := reconciler.matchDonorsAndHeirs(context.Background(), nil, []corev1.Node{tc.donor}, []corev1.Node{tc.heir}) if tc.wantPair == nil { assert.Len(pairs, 0) return @@ -330,7 +330,6 @@ func TestMatchDonorsAndHeirs(t *testing.T) { func TestCreateNewNodes(t *testing.T) { testCases := map[string]struct { outdatedNodes []corev1.Node - donors []corev1.Node pendingNodes []updatev1alpha1.PendingNode scalingGroupByID map[string]updatev1alpha1.ScalingGroup budget int @@ -574,105 +573,6 @@ func TestCreateNewNodes(t *testing.T) { }, budget: 1, }, - "control plane node upgraded first": { - outdatedNodes: []corev1.Node{ - // CP node - { - ObjectMeta: metav1.ObjectMeta{ - Name: "control-plane-node", - Annotations: map[string]string{ - scalingGroupAnnotation: "control-plane-scaling-group", - }, - Labels: map[string]string{ - // Mark this as a CP node as per - // https://kubernetes.io/docs/reference/labels-annotations-taints/#node-role-kubernetes-io-control-plane - "node-role.kubernetes.io/control-plane": "", - }, - }, - }, - // Worker node - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node", - Annotations: map[string]string{ - scalingGroupAnnotation: "scaling-group", - }, - }, - }, - }, - scalingGroupByID: map[string]updatev1alpha1.ScalingGroup{ - "scaling-group": { - Spec: updatev1alpha1.ScalingGroupSpec{ - GroupID: "scaling-group", - Role: updatev1alpha1.WorkerRole, - }, - Status: updatev1alpha1.ScalingGroupStatus{ - ImageReference: "image", - }, - }, - "control-plane-scaling-group": { - Spec: updatev1alpha1.ScalingGroupSpec{ - GroupID: "control-plane-scaling-group", - Role: updatev1alpha1.ControlPlaneRole, - }, - Status: updatev1alpha1.ScalingGroupStatus{ - ImageReference: "image", - }, - }, - }, - budget: 2, - wantCreateCalls: []string{"control-plane-scaling-group"}, - }, - "worker not upgraded while cp is in donors": { - donors: []corev1.Node{ - // CP node - { - ObjectMeta: metav1.ObjectMeta{ - Name: "control-plane-node", - Annotations: map[string]string{ - scalingGroupAnnotation: "control-plane-scaling-group", - }, - Labels: map[string]string{ - // Mark this as a CP node as per - // https://kubernetes.io/docs/reference/labels-annotations-taints/#node-role-kubernetes-io-control-plane - "node-role.kubernetes.io/control-plane": "", - }, - }, - }, - }, - outdatedNodes: []corev1.Node{ - // Worker node - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node", - Annotations: map[string]string{ - scalingGroupAnnotation: "scaling-group", - }, - }, - }, - }, - scalingGroupByID: map[string]updatev1alpha1.ScalingGroup{ - "scaling-group": { - Spec: updatev1alpha1.ScalingGroupSpec{ - GroupID: "scaling-group", - Role: updatev1alpha1.WorkerRole, - }, - Status: updatev1alpha1.ScalingGroupStatus{ - ImageReference: "image", - }, - }, - "control-plane-scaling-group": { - Spec: updatev1alpha1.ScalingGroupSpec{ - GroupID: "control-plane-scaling-group", - Role: updatev1alpha1.ControlPlaneRole, - }, - Status: updatev1alpha1.ScalingGroupStatus{ - ImageReference: "image", - }, - }, - }, - budget: 1, - }, } for name, tc := range testCases { @@ -692,8 +592,8 @@ func TestCreateNewNodes(t *testing.T) { }, Scheme: getScheme(t), } - newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.donors, tc.pendingNodes, tc.scalingGroupByID, tc.budget} - err := reconciler.createNewNodes(t.Context(), newNodeConfig) + newNodeConfig := newNodeConfig{desiredNodeImage, tc.outdatedNodes, tc.pendingNodes, tc.scalingGroupByID, tc.budget} + err := reconciler.createNewNodes(context.Background(), newNodeConfig) require.NoError(err) assert.Equal(tc.wantCreateCalls, reconciler.nodeReplacer.(*stubNodeReplacerWriter).createCalls) }) diff --git a/operators/constellation-node-operator/controllers/nodeversion_watches.go b/operators/constellation-node-operator/controllers/nodeversion_watches.go index 16f715bd3..3977a7c64 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_watches.go +++ b/operators/constellation-node-operator/controllers/nodeversion_watches.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/nodeversion_watches_test.go b/operators/constellation-node-operator/controllers/nodeversion_watches_test.go index b464419eb..690c1d2c4 100644 --- a/operators/constellation-node-operator/controllers/nodeversion_watches_test.go +++ b/operators/constellation-node-operator/controllers/nodeversion_watches_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers import ( + "context" "errors" "testing" @@ -249,7 +250,7 @@ func TestFindObjectsForScalingGroup(t *testing.T) { } assert := assert.New(t) reconciler := NodeVersionReconciler{} - requests := reconciler.findObjectsForScalingGroup(t.Context(), &scalingGroup) + requests := reconciler.findObjectsForScalingGroup(context.TODO(), &scalingGroup) assert.ElementsMatch(wantRequests, requests) } @@ -283,7 +284,7 @@ func TestFindAllNodeVersions(t *testing.T) { reconciler := NodeVersionReconciler{ Client: newStubReaderClient(t, []runtime.Object{tc.nodeVersion}, nil, tc.listNodeVersionsErr), } - requests := reconciler.findAllNodeVersions(t.Context(), nil) + requests := reconciler.findAllNodeVersions(context.TODO(), nil) assert.ElementsMatch(tc.wantRequests, requests) }) } diff --git a/operators/constellation-node-operator/controllers/pendingnode_controller.go b/operators/constellation-node-operator/controllers/pendingnode_controller.go index fe274a0ce..1bd5b97df 100644 --- a/operators/constellation-node-operator/controllers/pendingnode_controller.go +++ b/operators/constellation-node-operator/controllers/pendingnode_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go b/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go index 427199b7b..05e5de4ee 100644 --- a/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/pendingnode_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/pendingnode_controller_test.go b/operators/constellation-node-operator/controllers/pendingnode_controller_test.go index c77e0eb99..1a564af76 100644 --- a/operators/constellation-node-operator/controllers/pendingnode_controller_test.go +++ b/operators/constellation-node-operator/controllers/pendingnode_controller_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers @@ -137,7 +137,7 @@ func TestFindObjectsForNode(t *testing.T) { reconciler := PendingNodeReconciler{ Client: newStubReaderClient(t, []runtime.Object{tc.pendingNode}, nil, tc.listPendingNodesErr), } - requests := reconciler.findObjectsForNode(t.Context(), &corev1.Node{ + requests := reconciler.findObjectsForNode(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "pending-node", }, @@ -218,7 +218,7 @@ func TestReachedGoal(t *testing.T) { reconciler := PendingNodeReconciler{ Client: newStubReaderClient(t, []runtime.Object{&tc.pendingNode}, tc.getPendingNodeErr, nil), } - reachedGoal, err := reconciler.reachedGoal(t.Context(), tc.pendingNode, tc.nodeState) + reachedGoal, err := reconciler.reachedGoal(context.Background(), tc.pendingNode, tc.nodeState) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/controllers/scalinggroup_controller.go b/operators/constellation-node-operator/controllers/scalinggroup_controller.go index c01693b70..7336467c0 100644 --- a/operators/constellation-node-operator/controllers/scalinggroup_controller.go +++ b/operators/constellation-node-operator/controllers/scalinggroup_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/scalinggroup_controller_env_test.go b/operators/constellation-node-operator/controllers/scalinggroup_controller_env_test.go index 0f9661c6c..8101f7174 100644 --- a/operators/constellation-node-operator/controllers/scalinggroup_controller_env_test.go +++ b/operators/constellation-node-operator/controllers/scalinggroup_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/scalinggroup_controller_test.go b/operators/constellation-node-operator/controllers/scalinggroup_controller_test.go index b791183b2..9769722bd 100644 --- a/operators/constellation-node-operator/controllers/scalinggroup_controller_test.go +++ b/operators/constellation-node-operator/controllers/scalinggroup_controller_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/schemes_test.go b/operators/constellation-node-operator/controllers/schemes_test.go index df108b049..ed19215ab 100644 --- a/operators/constellation-node-operator/controllers/schemes_test.go +++ b/operators/constellation-node-operator/controllers/schemes_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/controllers/suite_test.go b/operators/constellation-node-operator/controllers/suite_test.go index ac3dedcb9..dd2df3407 100644 --- a/operators/constellation-node-operator/controllers/suite_test.go +++ b/operators/constellation-node-operator/controllers/suite_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controllers diff --git a/operators/constellation-node-operator/internal/cloud/api/scalinggroup.go b/operators/constellation-node-operator/internal/cloud/api/scalinggroup.go index d439c913b..ab08f07b9 100644 --- a/operators/constellation-node-operator/internal/cloud/api/scalinggroup.go +++ b/operators/constellation-node-operator/internal/cloud/api/scalinggroup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package api diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/api.go b/operators/constellation-node-operator/internal/cloud/aws/client/api.go index 98ab180f6..53316583b 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/api.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/autoscaler.go b/operators/constellation-node-operator/internal/cloud/aws/client/autoscaler.go index 73860311b..e74ef3b9b 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/autoscaler.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/autoscaler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/client.go b/operators/constellation-node-operator/internal/cloud/aws/client/client.go index f853d5e06..b477e76ef 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/client.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/client.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/client_test.go b/operators/constellation-node-operator/internal/cloud/aws/client/client_test.go index 6643803da..a2e81cc28 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/client_test.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/client_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage.go b/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage.go index f7a94e239..61d6026ee 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -9,7 +9,6 @@ package client import ( "context" "fmt" - "strings" "time" "github.com/aws/aws-sdk-go-v2/service/autoscaling" @@ -208,7 +207,7 @@ func (c *Client) DeleteNode(ctx context.Context, providerID string) error { ShouldDecrementDesiredCapacity: toPtr(true), }, ) - if err != nil && !isInstanceNotFoundError(err) { + if err != nil { return fmt.Errorf("failed to terminate instance: %w", err) } @@ -218,10 +217,3 @@ func (c *Client) DeleteNode(ctx context.Context, providerID string) error { func toPtr[T any](v T) *T { return &v } - -func isInstanceNotFoundError(err error) bool { - if err == nil { - return false - } - return strings.Contains(err.Error(), "Instance Id not found") -} diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage_test.go b/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage_test.go index 8f0296da6..1bae66ec0 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage_test.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/nodeimage_test.go @@ -1,14 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( "context" - "fmt" "testing" "github.com/aws/aws-sdk-go-v2/service/autoscaling" @@ -91,7 +90,7 @@ func TestGetNodeImage(t *testing.T) { describeInstancesErr: tc.describeInstancesErr, }, } - gotImage, err := client.GetNodeImage(t.Context(), tc.providerID) + gotImage, err := client.GetNodeImage(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -199,7 +198,7 @@ func TestGetScalingGroupID(t *testing.T) { describeInstancesErr: tc.describeInstancesErr, }, } - gotScalingID, err := client.GetScalingGroupID(t.Context(), tc.providerID) + gotScalingID, err := client.GetScalingGroupID(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -357,7 +356,7 @@ func TestCreateNode(t *testing.T) { setDesiredCapacityErr: tc.setDesiredCapacityErr, }, } - nodeName, providerID, err := client.CreateNode(t.Context(), tc.providerID) + nodeName, providerID, err := client.CreateNode(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -383,10 +382,6 @@ func TestDeleteNode(t *testing.T) { terminateInstanceErr: assert.AnError, wantErr: true, }, - "deleting node succeeds when the instance does not exist": { - providerID: "aws:///us-east-2a/i-00000000000000000", - terminateInstanceErr: fmt.Errorf("Instance Id not found - No managed instance found for instance ID: i-00000000000000000"), - }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { @@ -398,7 +393,7 @@ func TestDeleteNode(t *testing.T) { terminateInstanceErr: tc.terminateInstanceErr, }, } - err := client.DeleteNode(t.Context(), tc.providerID) + err := client.DeleteNode(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode.go b/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode.go index c559a61cc..6121ce682 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode_test.go b/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode_test.go index 7f43ee6ba..b2745358f 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode_test.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/pendingnode_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "testing" @@ -160,7 +161,7 @@ func TestGetNodeState(t *testing.T) { describeInstanceStatusErr: tc.describeInstanceStatusErr, }, } - nodeState, err := client.GetNodeState(t.Context(), tc.providerID) + nodeState, err := client.GetNodeState(context.Background(), tc.providerID) assert.Equal(tc.wantState, nodeState) if tc.wantErr { assert.Error(err) diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup.go b/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup.go index 50673ea67..6eee2c5e2 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup_test.go b/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup_test.go index b286e182d..b5e4f60ce 100644 --- a/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup_test.go +++ b/operators/constellation-node-operator/internal/cloud/aws/client/scalinggroup_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "testing" "github.com/aws/aws-sdk-go-v2/service/autoscaling" @@ -90,7 +91,7 @@ func TestGetScalingGroupImage(t *testing.T) { }, }, } - scalingGroupImage, err := client.GetScalingGroupImage(t.Context(), tc.providerID) + scalingGroupImage, err := client.GetScalingGroupImage(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -215,7 +216,7 @@ func TestSetScalingGroupImage(t *testing.T) { }, }, } - err := client.SetScalingGroupImage(t.Context(), tc.providerID, tc.imageURI) + err := client.SetScalingGroupImage(context.Background(), tc.providerID, tc.imageURI) if tc.wantErr { assert.Error(err) return @@ -318,7 +319,7 @@ func TestListScalingGroups(t *testing.T) { describeAutoScalingGroupsErr: tc.describeAutoScalingGroupsErr, }, } - gotGroups, err := client.ListScalingGroups(t.Context(), tc.providerID) + gotGroups, err := client.ListScalingGroups(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/BUILD.bazel b/operators/constellation-node-operator/internal/cloud/azure/client/BUILD.bazel index ee7e27d79..2c763b1e2 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/BUILD.bazel +++ b/operators/constellation-node-operator/internal/cloud/azure/client/BUILD.bazel @@ -27,7 +27,7 @@ go_library( "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azcore//to", "@com_github_azure_azure_sdk_for_go_sdk_azidentity//:azidentity", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6//:armcompute", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5//:armcompute", "@com_github_spf13_afero//:afero", ], ) @@ -53,7 +53,7 @@ go_test( "@com_github_azure_azure_sdk_for_go_sdk_azcore//:azcore", "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azcore//to", - "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v6//:armcompute", + "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_compute_armcompute_v5//:armcompute", "@com_github_spf13_afero//:afero", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/api.go b/operators/constellation-node-operator/internal/cloud/azure/client/api.go index 535939148..6f77a9929 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/api.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -10,7 +10,7 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/poller" ) diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/autoscaler.go b/operators/constellation-node-operator/internal/cloud/azure/client/autoscaler.go index a946810db..d3c268f93 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/autoscaler.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/autoscaler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/client.go b/operators/constellation-node-operator/internal/cloud/azure/client/client.go index 4e31cb0b9..af78fee88 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/client.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/client.go @@ -1,14 +1,14 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/poller" "github.com/spf13/afero" ) diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/client_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/client_test.go index 6daf70bb8..f798d2be8 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/client_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/client_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -11,7 +11,7 @@ import ( "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" ) type stubScaleSetsAPI struct { diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/config.go b/operators/constellation-node-operator/internal/cloud/azure/client/config.go index 52f0091b1..4a26734f8 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/config.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/config_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/config_test.go index 82da00082..482221e36 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/config_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/config_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/instanceview.go b/operators/constellation-node-operator/internal/cloud/azure/client/instanceview.go index 98dfa74e3..e55ab21c6 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/instanceview.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/instanceview.go @@ -1,13 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" ) diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/instanceview_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/instanceview_test.go index c47829de7..26737e6cc 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/instanceview_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/instanceview_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -10,7 +10,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/stretchr/testify/assert" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage.go b/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage.go index d36b8a97c..0af5c5885 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -11,7 +11,7 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/internal/mpimage" ) diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage_test.go index 86bfd55ed..135859602 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/nodeimage_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -13,7 +13,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/poller" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -98,7 +98,7 @@ func TestGetNodeImage(t *testing.T) { getErr: tc.getScaleSetVMErr, }, } - gotImage, err := client.GetNodeImage(t.Context(), tc.providerID) + gotImage, err := client.GetNodeImage(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -131,7 +131,7 @@ func TestGetScalingGroupID(t *testing.T) { require := require.New(t) client := Client{} - gotScalingGroupID, err := client.GetScalingGroupID(t.Context(), tc.providerID) + gotScalingGroupID, err := client.GetScalingGroupID(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -262,7 +262,7 @@ func TestCreateNode(t *testing.T) { var createErr error go func() { defer wg.Done() - gotNodeName, gotProviderID, createErr = client.CreateNode(t.Context(), tc.scalingGroupID) + gotNodeName, gotProviderID, createErr = client.CreateNode(context.Background(), tc.scalingGroupID) }() // want error before PollUntilDone is called @@ -319,7 +319,7 @@ func TestDeleteNode(t *testing.T) { client := Client{ scaleSetsAPI: &stubScaleSetsAPI{deleteErr: tc.deleteErr}, } - err := client.DeleteNode(t.Context(), tc.providerID) + err := client.DeleteNode(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -343,25 +343,25 @@ func TestCapacityPollingHandler(t *testing.T) { }, wantedCapacity: wantCapacity, } - assert.NoError(handler.Poll(t.Context())) + assert.NoError(handler.Poll(context.Background())) assert.False(handler.Done()) // Calling Result early should error - assert.Error(handler.Result(t.Context(), &gotCapacity)) + assert.Error(handler.Result(context.Background(), &gotCapacity)) // let scaleSet API error handler.scaleSetsAPI.(*stubScaleSetsAPI).getErr = errors.New("get error") - assert.Error(handler.Poll(t.Context())) + assert.Error(handler.Poll(context.Background())) handler.scaleSetsAPI.(*stubScaleSetsAPI).getErr = nil // let scaleSet API return invalid SKU handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = nil - assert.Error(handler.Poll(t.Context())) + assert.Error(handler.Poll(context.Background())) // let Poll finish handler.scaleSetsAPI.(*stubScaleSetsAPI).scaleSet.SKU = &armcompute.SKU{Capacity: to.Ptr(wantCapacity)} - assert.NoError(handler.Poll(t.Context())) + assert.NoError(handler.Poll(context.Background())) assert.True(handler.Done()) - assert.NoError(handler.Result(t.Context(), &gotCapacity)) + assert.NoError(handler.Result(context.Background(), &gotCapacity)) assert.Equal(wantCapacity, gotCapacity) } diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode.go b/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode.go index 9bd14e0f8..99f6bedd9 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode_test.go index 9977b1fd2..aeda7edad 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/pendingnode_test.go @@ -1,19 +1,20 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "net/http" "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -66,7 +67,7 @@ func TestGetNodeState(t *testing.T) { instanceViewErr: tc.getInstanceViewErr, }, } - gotState, err := client.GetNodeState(t.Context(), tc.providerID) + gotState, err := client.GetNodeState(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/providerid.go b/operators/constellation-node-operator/internal/cloud/azure/client/providerid.go index 8ef8e38f8..a31637fb0 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/providerid.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/providerid.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/providerid_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/providerid_test.go index 2376c618e..64b14badf 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/providerid_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/providerid_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup.go b/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup.go index 285f90986..26c682c64 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -12,7 +12,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/mpimage" updatev1alpha1 "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/api/v1alpha1" diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup_test.go index 7915e66ad..11b65da0f 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/scalinggroup_test.go @@ -1,17 +1,18 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/edgelesssys/constellation/v2/internal/constants" cspapi "github.com/edgelesssys/constellation/v2/operators/constellation-node-operator/internal/cloud/api" "github.com/stretchr/testify/assert" @@ -102,7 +103,7 @@ func TestGetScalingGroupImage(t *testing.T) { getErr: tc.getScaleSetErr, }, } - gotImage, err := client.GetScalingGroupImage(t.Context(), tc.scalingGroupID) + gotImage, err := client.GetScalingGroupImage(context.Background(), tc.scalingGroupID) if tc.wantErr { assert.Error(err) return @@ -154,7 +155,7 @@ func TestSetScalingGroupImage(t *testing.T) { resultErr: tc.resultErr, }, } - err := client.SetScalingGroupImage(t.Context(), tc.scalingGroupID, tc.imageURI) + err := client.SetScalingGroupImage(context.Background(), tc.scalingGroupID, tc.imageURI) if tc.wantErr { assert.Error(err) return @@ -290,7 +291,7 @@ func TestListScalingGroups(t *testing.T) { }, }, } - gotGroups, err := client.ListScalingGroups(t.Context(), "uid") + gotGroups, err := client.ListScalingGroups(context.Background(), "uid") if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/vmss.go b/operators/constellation-node-operator/internal/cloud/azure/client/vmss.go index b65ce59df..b989f9a99 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/vmss.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/vmss.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/azure/client/vmss_test.go b/operators/constellation-node-operator/internal/cloud/azure/client/vmss_test.go index b4767239b..2588941e5 100644 --- a/operators/constellation-node-operator/internal/cloud/azure/client/vmss_test.go +++ b/operators/constellation-node-operator/internal/cloud/azure/client/vmss_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/fake/client/client.go b/operators/constellation-node-operator/internal/cloud/fake/client/client.go index adcfc27aa..56e97e01d 100644 --- a/operators/constellation-node-operator/internal/cloud/fake/client/client.go +++ b/operators/constellation-node-operator/internal/cloud/fake/client/client.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/api.go b/operators/constellation-node-operator/internal/cloud/gcp/client/api.go index 20b6b0e06..12966da9e 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/api.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/api.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/autocaler.go b/operators/constellation-node-operator/internal/cloud/gcp/client/autocaler.go index ba6bb4851..6ac106353 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/autocaler.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/autocaler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/client.go b/operators/constellation-node-operator/internal/cloud/gcp/client/client.go index b7a221dda..aa0a46ae7 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/client.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/client.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/client_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/client_test.go index 184c16cde..e7779453b 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/client_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/client_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/config.go b/operators/constellation-node-operator/internal/cloud/gcp/client/config.go index 905089afe..a3fa313e8 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/config.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/config.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/config_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/config_test.go index da389081c..cf438fe90 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/config_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/config_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/disks.go b/operators/constellation-node-operator/internal/cloud/gcp/client/disks.go index d26b4c626..a8d244c20 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/disks.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/disks.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -14,8 +14,8 @@ import ( ) var ( - diskSourceRegex = regexp.MustCompile(`^https://www\.googleapis\.com/compute/v1/projects/([^/]+)/zones/([^/]+)/disks/([^/]+)$`) - computeAPIBase = regexp.MustCompile(`^https://www\.googleapis\.com/compute/v1/(.+)$`) + diskSourceRegex = regexp.MustCompile(`^https://www.googleapis.com/compute/v1/projects/([^/]+)/zones/([^/]+)/disks/([^/]+)$`) + computeAPIBase = regexp.MustCompile(`^https://www.googleapis.com/compute/v1/(.+)$`) ) // diskSourceToDiskReq converts a disk source URI to a disk request. diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/disks_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/disks_test.go index 473c4b768..f92ea8f63 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/disks_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/disks_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -36,10 +36,6 @@ func TestDiskSourceToDiskReq(t *testing.T) { diskSource: "invalid://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk", wantErr: true, }, - "url dots in regex are escaped": { - diskSource: "https://wwwAgoogleapisAcom/compute/v1/projects/project/zones/zone/disks/disk", - wantErr: true, - }, } for name, tc := range testCases { @@ -71,10 +67,6 @@ func TestURINormalize(t *testing.T) { imageURI: "projects/project/global/images/image", wantNormalized: "projects/project/global/images/image", }, - "url dots in regex are escaped": { - imageURI: "https://wwwAgoogleapisAcom/compute/v1/projects/project/global/images/image", - wantNormalized: "https://wwwAgoogleapisAcom/compute/v1/projects/project/global/images/image", - }, } for name, tc := range testCases { diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/gcpwrappers.go b/operators/constellation-node-operator/internal/cloud/gcp/client/gcpwrappers.go index f7c7313af..3d34efba7 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/gcpwrappers.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/gcpwrappers.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers.go b/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers.go index ff3e10564..0b6de00d0 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers_test.go index 365d61a62..23cce1ab2 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/instancegroupmanagers_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate.go b/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate.go index e7c5c7a88..85c1f6979 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate_test.go index 67e36b7f8..242fa09be 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/instancetemplate_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/metadata.go b/operators/constellation-node-operator/internal/cloud/gcp/client/metadata.go index ce11c214f..a8a89345a 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/metadata.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/metadata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/metadata_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/metadata_test.go index 8c262e9e6..c4dff26bc 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/metadata_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/metadata_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage.go b/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage.go index 03182ad8b..ddd102d39 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -139,8 +139,7 @@ func (c *Client) DeleteNode(ctx context.Context, providerID string) error { Project: instanceGroupProject, Zone: instanceGroupZone, InstanceGroupManagersDeleteInstancesRequestResource: &computepb.InstanceGroupManagersDeleteInstancesRequest{ - Instances: []string{instanceID}, - SkipInstancesOnValidationError: toPtr(true), + Instances: []string{instanceID}, }, }) if err != nil { @@ -148,7 +147,3 @@ func (c *Client) DeleteNode(ctx context.Context, providerID string) error { } return op.Wait(ctx) } - -func toPtr[T any](v T) *T { - return &v -} diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage_test.go index 5fe9abd92..8ce178dca 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/nodeimage_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "math/rand" "testing" @@ -100,7 +101,7 @@ func TestGetNodeImage(t *testing.T) { disk: tc.disk, }, } - gotImage, err := client.GetNodeImage(t.Context(), tc.providerID) + gotImage, err := client.GetNodeImage(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -161,7 +162,7 @@ func TestGetScalingGroupID(t *testing.T) { instance: &instance, }, } - gotScalingGroupID, err := client.GetScalingGroupID(t.Context(), tc.providerID) + gotScalingGroupID, err := client.GetScalingGroupID(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return @@ -220,7 +221,7 @@ func TestCreateNode(t *testing.T) { }, prng: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), } - instanceName, providerID, err := client.CreateNode(t.Context(), tc.scalingGroupID) + instanceName, providerID, err := client.CreateNode(context.Background(), tc.scalingGroupID) if tc.wantErr { assert.Error(err) return @@ -286,7 +287,7 @@ func TestDeleteNode(t *testing.T) { }, }, } - err := client.DeleteNode(t.Context(), tc.providerID) + err := client.DeleteNode(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode.go b/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode.go index 4bacead00..d2c401433 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode_test.go index d4a03405a..5791d7fd4 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/pendingnode_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "net/http" "testing" @@ -107,7 +108,7 @@ func TestGetNodeState(t *testing.T) { }, }, } - nodeState, err := client.GetNodeState(t.Context(), tc.providerID) + nodeState, err := client.GetNodeState(context.Background(), tc.providerID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/project.go b/operators/constellation-node-operator/internal/cloud/gcp/client/project.go index 269f2a5fb..edd131b5d 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/project.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/project.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/project_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/project_test.go index 357fb5ca6..cd0c90326 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/project_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/project_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "testing" @@ -56,7 +57,7 @@ func TestCanonicalProjectID(t *testing.T) { getErr: tc.getProjectErr, }, } - gotID, err := client.canonicalProjectID(t.Context(), tc.projectID) + gotID, err := client.canonicalProjectID(context.Background(), tc.projectID) if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/providerid.go b/operators/constellation-node-operator/internal/cloud/gcp/client/providerid.go index 847a9167a..6fd197e8a 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/providerid.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/providerid.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/providerid_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/providerid_test.go index 2d9b61894..356d73eb7 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/providerid_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/providerid_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup.go b/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup.go index 991d9ad07..683ad89ce 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client @@ -111,8 +111,6 @@ func (c *Client) GetAutoscalingGroupName(scalingGroupID string) (string, error) // ListScalingGroups retrieves a list of scaling groups for the cluster. func (c *Client) ListScalingGroups(ctx context.Context, uid string) ([]cspapi.ScalingGroup, error) { results := []cspapi.ScalingGroup{} - var retErr error - iter := c.instanceGroupManagersAPI.AggregatedList(ctx, &computepb.AggregatedListInstanceGroupManagersRequest{ Project: c.projectID, }) @@ -140,8 +138,7 @@ func (c *Client) ListScalingGroups(ctx context.Context, uid string) ([]cspapi.Sc InstanceTemplate: templateURI[len(templateURI)-1], }) if err != nil { - retErr = errors.Join(retErr, fmt.Errorf("getting instance template %q: %w", templateURI[len(templateURI)-1], err)) - continue + return nil, fmt.Errorf("getting instance template: %w", err) } if template.Properties == nil || template.Properties.Labels == nil { continue @@ -152,16 +149,14 @@ func (c *Client) ListScalingGroups(ctx context.Context, uid string) ([]cspapi.Sc groupID, err := c.canonicalInstanceGroupID(ctx, *grpManager.SelfLink) if err != nil { - retErr = errors.Join(retErr, fmt.Errorf("getting canonical instance group ID: %w", err)) - continue + return nil, fmt.Errorf("normalizing instance group ID: %w", err) } role := updatev1alpha1.NodeRoleFromString(template.Properties.Labels["constellation-role"]) name, err := c.GetScalingGroupName(groupID) if err != nil { - retErr = errors.Join(retErr, fmt.Errorf("getting scaling group name: %w", err)) - continue + return nil, fmt.Errorf("getting scaling group name: %w", err) } nodeGroupName := template.Properties.Labels["constellation-node-group"] @@ -178,8 +173,7 @@ func (c *Client) ListScalingGroups(ctx context.Context, uid string) ([]cspapi.Sc autoscalerGroupName, err := c.GetAutoscalingGroupName(groupID) if err != nil { - retErr = errors.Join(retErr, fmt.Errorf("getting autoscaling group name: %w", err)) - continue + return nil, fmt.Errorf("getting autoscaling group name: %w", err) } results = append(results, cspapi.ScalingGroup{ @@ -191,11 +185,6 @@ func (c *Client) ListScalingGroups(ctx context.Context, uid string) ([]cspapi.Sc }) } } - - if len(results) == 0 { - return nil, errors.Join(errors.New("no scaling groups found"), retErr) - } - return results, nil } diff --git a/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup_test.go b/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup_test.go index 69faf062e..a91f49d1d 100644 --- a/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup_test.go +++ b/operators/constellation-node-operator/internal/cloud/gcp/client/scalinggroup_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package client import ( + "context" "errors" "testing" @@ -95,7 +96,7 @@ func TestGetScalingGroupImage(t *testing.T) { template: tc.instanceTemplate, }, } - gotImage, err := client.GetScalingGroupImage(t.Context(), tc.scalingGroupID) + gotImage, err := client.GetScalingGroupImage(context.Background(), tc.scalingGroupID) if tc.wantErr { assert.Error(err) return @@ -280,7 +281,7 @@ func TestSetScalingGroupImage(t *testing.T) { template: tc.instanceTemplate, }, } - err := client.SetScalingGroupImage(t.Context(), tc.scalingGroupID, tc.imageURI) + err := client.SetScalingGroupImage(context.Background(), tc.scalingGroupID, tc.imageURI) if tc.wantErr { assert.Error(err) return @@ -425,11 +426,8 @@ func TestListScalingGroups(t *testing.T) { templateLabels: map[string]string{ "label": "value", }, - wantErr: true, - }, - "invalid instance group manager": { - wantErr: true, }, + "invalid instance group manager": {}, } for name, tc := range testCases { @@ -455,7 +453,7 @@ func TestListScalingGroups(t *testing.T) { getErr: tc.templateGetErr, }, } - gotGroups, err := client.ListScalingGroups(t.Context(), "uid") + gotGroups, err := client.ListScalingGroups(context.Background(), "uid") if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/constants/constants.go b/operators/constellation-node-operator/internal/constants/constants.go index df85aed00..b057df59c 100644 --- a/operators/constellation-node-operator/internal/constants/constants.go +++ b/operators/constellation-node-operator/internal/constants/constants.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package constants diff --git a/operators/constellation-node-operator/internal/controlplane/controlplane.go b/operators/constellation-node-operator/internal/controlplane/controlplane.go index 61a7d73e1..59f77ad8d 100644 --- a/operators/constellation-node-operator/internal/controlplane/controlplane.go +++ b/operators/constellation-node-operator/internal/controlplane/controlplane.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controlplane diff --git a/operators/constellation-node-operator/internal/controlplane/controlplane_test.go b/operators/constellation-node-operator/internal/controlplane/controlplane_test.go index facae56d5..a089f02b3 100644 --- a/operators/constellation-node-operator/internal/controlplane/controlplane_test.go +++ b/operators/constellation-node-operator/internal/controlplane/controlplane_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package controlplane diff --git a/operators/constellation-node-operator/internal/deploy/deploy.go b/operators/constellation-node-operator/internal/deploy/deploy.go index 812ca23c8..fcd79be44 100644 --- a/operators/constellation-node-operator/internal/deploy/deploy.go +++ b/operators/constellation-node-operator/internal/deploy/deploy.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package deploy provides functions to deploy initial resources for the node operator. diff --git a/operators/constellation-node-operator/internal/deploy/deploy_test.go b/operators/constellation-node-operator/internal/deploy/deploy_test.go index 9906e7d45..d091714ae 100644 --- a/operators/constellation-node-operator/internal/deploy/deploy_test.go +++ b/operators/constellation-node-operator/internal/deploy/deploy_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy @@ -86,7 +86,7 @@ func TestInitialResources(t *testing.T) { }, } scalingGroupGetter := newScalingGroupGetter(tc.items, tc.imageErr, tc.nameErr, tc.listErr) - err := InitialResources(t.Context(), k8sClient, &stubImageInfo{}, scalingGroupGetter, "uid") + err := InitialResources(context.Background(), k8sClient, &stubImageInfo{}, scalingGroupGetter, "uid") if tc.wantErr { assert.Error(err) return @@ -156,7 +156,7 @@ func TestCreateAutoscalingStrategy(t *testing.T) { require := require.New(t) k8sClient := &fakeK8sClient{createErr: tc.createErr} - err := createAutoscalingStrategy(t.Context(), k8sClient, "stub") + err := createAutoscalingStrategy(context.Background(), k8sClient, "stub") if tc.wantErr { assert.Error(err) return @@ -246,7 +246,7 @@ func TestCreateNodeVersion(t *testing.T) { if tc.existingNodeVersion != nil { k8sClient.createdObjects = append(k8sClient.createdObjects, tc.existingNodeVersion) } - err := createNodeVersion(t.Context(), k8sClient, "image-reference", "image-version") + err := createNodeVersion(context.Background(), k8sClient, "image-reference", "image-version") if tc.wantErr { assert.Error(err) return diff --git a/operators/constellation-node-operator/internal/deploy/imageinfo.go b/operators/constellation-node-operator/internal/deploy/imageinfo.go index 36b14fa95..bcfd5e912 100644 --- a/operators/constellation-node-operator/internal/deploy/imageinfo.go +++ b/operators/constellation-node-operator/internal/deploy/imageinfo.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy diff --git a/operators/constellation-node-operator/internal/deploy/imageinfo_test.go b/operators/constellation-node-operator/internal/deploy/imageinfo_test.go index 949d9d9a6..e5a98b292 100644 --- a/operators/constellation-node-operator/internal/deploy/imageinfo_test.go +++ b/operators/constellation-node-operator/internal/deploy/imageinfo_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package deploy diff --git a/operators/constellation-node-operator/internal/etcd/etcd.go b/operators/constellation-node-operator/internal/etcd/etcd.go index 706e6039e..5f4e33c89 100644 --- a/operators/constellation-node-operator/internal/etcd/etcd.go +++ b/operators/constellation-node-operator/internal/etcd/etcd.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package etcd @@ -127,7 +127,7 @@ func getInitialEndpoints(k8sClient client.Client) ([]string, error) { } type etcdClient interface { - MemberList(ctx context.Context, opts ...clientv3.OpOption) (*clientv3.MemberListResponse, error) + MemberList(ctx context.Context) (*clientv3.MemberListResponse, error) MemberRemove(ctx context.Context, memberID uint64) (*clientv3.MemberRemoveResponse, error) Sync(ctx context.Context) error Close() error diff --git a/operators/constellation-node-operator/internal/etcd/etcd_test.go b/operators/constellation-node-operator/internal/etcd/etcd_test.go index 5b9938079..5775140cb 100644 --- a/operators/constellation-node-operator/internal/etcd/etcd_test.go +++ b/operators/constellation-node-operator/internal/etcd/etcd_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package etcd @@ -54,7 +54,7 @@ func TestRemoveEtcdMemberFromCluster(t *testing.T) { }, listErr: tc.memberListErr, }} - err := client.RemoveEtcdMemberFromCluster(t.Context(), tc.vpcIP) + err := client.RemoveEtcdMemberFromCluster(context.Background(), tc.vpcIP) if tc.wantErr { assert.Error(err) return @@ -98,7 +98,7 @@ func TestGetMemberID(t *testing.T) { members: tc.members, listErr: tc.memberListErr, }} - gotMemberID, err := client.getMemberID(t.Context(), "192.0.2.1") + gotMemberID, err := client.getMemberID(context.Background(), "192.0.2.1") if tc.wantErr { assert.Error(err) return @@ -185,7 +185,7 @@ type stubEtcdClient struct { closeErr error } -func (c *stubEtcdClient) MemberList(_ context.Context, _ ...clientv3.OpOption) (*clientv3.MemberListResponse, error) { +func (c *stubEtcdClient) MemberList(_ context.Context) (*clientv3.MemberListResponse, error) { return &clientv3.MemberListResponse{ Members: c.members, }, c.listErr diff --git a/operators/constellation-node-operator/internal/executor/executor.go b/operators/constellation-node-operator/internal/executor/executor.go index ae8f54883..152dc3a40 100644 --- a/operators/constellation-node-operator/internal/executor/executor.go +++ b/operators/constellation-node-operator/internal/executor/executor.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package executor contains a task executor / scheduler for the constellation node operator. @@ -205,7 +205,7 @@ func (c *Config) applyDefaults() { c.PollingFrequency = defaultPollingFrequency } if c.RateLimiter == nil { - c.RateLimiter = workqueue.DefaultTypedControllerRateLimiter[any]() + c.RateLimiter = workqueue.DefaultControllerRateLimiter() } } diff --git a/operators/constellation-node-operator/internal/executor/executor_test.go b/operators/constellation-node-operator/internal/executor/executor_test.go index 945adfdc1..a18d249fd 100644 --- a/operators/constellation-node-operator/internal/executor/executor_test.go +++ b/operators/constellation-node-operator/internal/executor/executor_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package executor @@ -29,7 +29,7 @@ func TestStartTriggersImmediateReconciliation(t *testing.T) { } exec := New(ctrl, cfg) // on start, the executor should trigger a reconciliation - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called ctrl.stop <- struct{}{} @@ -48,10 +48,10 @@ func TestStartMultipleTimesIsCoalesced(t *testing.T) { } exec := New(ctrl, cfg) // start once - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) // start again multiple times for i := 0; i < 10; i++ { - _ = exec.Start(t.Context()) + _ = exec.Start(context.Background()) } <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called @@ -72,7 +72,7 @@ func TestErrorTriggersImmediateReconciliation(t *testing.T) { RateLimiter: &stubRateLimiter{}, // no rate limiting } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) for i := 0; i < 10; i++ { <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called } @@ -96,7 +96,7 @@ func TestErrorTriggersRateLimiting(t *testing.T) { }, } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called once to trigger rate limiting ctrl.stop <- struct{}{} @@ -120,7 +120,7 @@ func TestRequeueAfterResultRequeueInterval(t *testing.T) { }, } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) for i := 0; i < 10; i++ { <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called } @@ -143,7 +143,7 @@ func TestExternalTrigger(t *testing.T) { }, } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) <-ctrl.waitUntilReconciled // initial trigger for i := 0; i < 10; i++ { exec.Trigger() @@ -167,7 +167,7 @@ func TestSimultaneousExternalTriggers(t *testing.T) { }, } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) <-ctrl.waitUntilReconciled // initial trigger for i := 0; i < 100; i++ { exec.Trigger() // extra trigger calls are coalesced @@ -184,7 +184,7 @@ func TestSimultaneousExternalTriggers(t *testing.T) { func TestContextCancel(t *testing.T) { assert := assert.New(t) - ctx, cancel := context.WithCancel(t.Context()) + ctx, cancel := context.WithCancel(context.Background()) ctrl := newStubController(Result{}, nil) cfg := Config{ PollingFrequency: time.Hour * 24 * 365, // 1 year. Should be high enough to not trigger the timer in the test. @@ -219,7 +219,7 @@ func TestRequeueAfterPollingFrequency(t *testing.T) { }, } exec := New(ctrl, cfg) - stopAndWait := exec.Start(t.Context()) + stopAndWait := exec.Start(context.Background()) for i := 0; i < 10; i++ { <-ctrl.waitUntilReconciled // makes sure to wait until reconcile was called } @@ -274,7 +274,7 @@ func (s *stubRateLimiter) When(_ any) time.Duration { func (s *stubRateLimiter) Forget(_ any) {} -func assertBetween(assert *assert.Assertions, minimum, maximum, actual int) { - assert.GreaterOrEqual(actual, minimum) - assert.LessOrEqual(actual, maximum) +func assertBetween(assert *assert.Assertions, min, max, actual int) { + assert.GreaterOrEqual(actual, min) + assert.LessOrEqual(actual, max) } diff --git a/operators/constellation-node-operator/internal/node/node.go b/operators/constellation-node-operator/internal/node/node.go index b94241c5b..854b49906 100644 --- a/operators/constellation-node-operator/internal/node/node.go +++ b/operators/constellation-node-operator/internal/node/node.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package node diff --git a/operators/constellation-node-operator/internal/node/node_test.go b/operators/constellation-node-operator/internal/node/node_test.go index bd746b976..8a11a19aa 100644 --- a/operators/constellation-node-operator/internal/node/node_test.go +++ b/operators/constellation-node-operator/internal/node/node_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package node diff --git a/operators/constellation-node-operator/internal/patch/annotations.go b/operators/constellation-node-operator/internal/patch/annotations.go index 685759350..3f57b1ad1 100644 --- a/operators/constellation-node-operator/internal/patch/annotations.go +++ b/operators/constellation-node-operator/internal/patch/annotations.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package patch diff --git a/operators/constellation-node-operator/internal/patch/annotations_test.go b/operators/constellation-node-operator/internal/patch/annotations_test.go index f2571e264..cb8dae133 100644 --- a/operators/constellation-node-operator/internal/patch/annotations_test.go +++ b/operators/constellation-node-operator/internal/patch/annotations_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package patch diff --git a/operators/constellation-node-operator/internal/patch/labels.go b/operators/constellation-node-operator/internal/patch/labels.go index 14970fb00..7aca3a44c 100644 --- a/operators/constellation-node-operator/internal/patch/labels.go +++ b/operators/constellation-node-operator/internal/patch/labels.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package patch diff --git a/operators/constellation-node-operator/internal/patch/labels_test.go b/operators/constellation-node-operator/internal/patch/labels_test.go index b4547259d..b6913f5da 100644 --- a/operators/constellation-node-operator/internal/patch/labels_test.go +++ b/operators/constellation-node-operator/internal/patch/labels_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package patch diff --git a/operators/constellation-node-operator/internal/poller/poller.go b/operators/constellation-node-operator/internal/poller/poller.go index 6c3328e09..26d2e2757 100644 --- a/operators/constellation-node-operator/internal/poller/poller.go +++ b/operators/constellation-node-operator/internal/poller/poller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package poller implements a poller that can be used to wait for a condition to be met. diff --git a/operators/constellation-node-operator/internal/poller/poller_test.go b/operators/constellation-node-operator/internal/poller/poller_test.go index fac1421f4..e60564150 100644 --- a/operators/constellation-node-operator/internal/poller/poller_test.go +++ b/operators/constellation-node-operator/internal/poller/poller_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package poller @@ -49,17 +49,17 @@ func TestResult(t *testing.T) { pollErr: tc.pollErr, resultErr: tc.resultErr, }) - _, firstErr := poller.Result(t.Context()) + _, firstErr := poller.Result(context.Background()) if tc.wantErr { assert.Error(firstErr) // calling Result again should return the same error - _, secondErr := poller.Result(t.Context()) + _, secondErr := poller.Result(context.Background()) assert.Equal(firstErr, secondErr) return } assert.NoError(firstErr) // calling Result again should still not return an error - _, secondErr := poller.Result(t.Context()) + _, secondErr := poller.Result(context.Background()) assert.NoError(secondErr) }) } @@ -136,7 +136,7 @@ func TestPollUntilDone(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - gotResult, gotErr = poller.PollUntilDone(t.Context(), &PollUntilDoneOptions{ + gotResult, gotErr = poller.PollUntilDone(context.Background(), &PollUntilDoneOptions{ MaxBackoff: tc.maxBackoff, Clock: clock, }) diff --git a/operators/constellation-node-operator/internal/upgrade/BUILD.bazel b/operators/constellation-node-operator/internal/upgrade/BUILD.bazel index cf1588a80..546ffaf7d 100644 --- a/operators/constellation-node-operator/internal/upgrade/BUILD.bazel +++ b/operators/constellation-node-operator/internal/upgrade/BUILD.bazel @@ -1,5 +1,4 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel/go:go_test.bzl", "go_test") go_library( name = "upgrade", @@ -10,19 +9,7 @@ go_library( "//internal/constants", "//internal/versions/components", "//upgrade-agent/upgradeproto", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) - -go_test( - name = "upgrade_test", - srcs = ["upgrade_test.go"], - embed = [":upgrade"], - deps = [ - "//internal/versions/components", - "//upgrade-agent/upgradeproto", - "@com_github_stretchr_testify//require", - "@org_golang_google_grpc//:grpc", - ], -) diff --git a/operators/constellation-node-operator/internal/upgrade/upgrade.go b/operators/constellation-node-operator/internal/upgrade/upgrade.go index ba6a081b0..18813557c 100644 --- a/operators/constellation-node-operator/internal/upgrade/upgrade.go +++ b/operators/constellation-node-operator/internal/upgrade/upgrade.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package upgrade @@ -21,25 +21,24 @@ import ( // Client is a client for the upgrade agent. type Client struct { - addr string dialer Dialer } -// NewClient creates a new upgrade agent client connecting to the default upgrade-agent Unix socket. +// NewClient creates a new upgrade agent client. func NewClient() *Client { - return newClientWithAddress(mainconstants.UpgradeAgentMountPath) -} - -func newClientWithAddress(addr string) *Client { return &Client{ - addr: "unix:" + addr, dialer: &net.Dialer{}, } } // Upgrade upgrades the Constellation node to the given Kubernetes version. func (c *Client) Upgrade(ctx context.Context, kubernetesComponents components.Components, WantedKubernetesVersion string) error { - conn, err := grpc.NewClient(c.addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, mainconstants.UpgradeAgentMountPath, grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + return c.dialer.DialContext(ctx, "unix", addr) + }, + )) if err != nil { return fmt.Errorf("failed to dial: %w", err) } diff --git a/operators/constellation-node-operator/internal/upgrade/upgrade_test.go b/operators/constellation-node-operator/internal/upgrade/upgrade_test.go deleted file mode 100644 index acb725d61..000000000 --- a/operators/constellation-node-operator/internal/upgrade/upgrade_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright (c) Edgeless Systems GmbH - -SPDX-License-Identifier: BUSL-1.1 -*/ - -package upgrade - -import ( - "context" - "net" - "os" - "path/filepath" - "testing" - - "github.com/edgelesssys/constellation/v2/internal/versions/components" - "github.com/edgelesssys/constellation/v2/upgrade-agent/upgradeproto" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -// TestGRPCDialer is a regression test to ensure the upgrade client can connect to a UDS. -func TestGRPCDialer(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - sockAddr := filepath.Join(dir, "test.socket") - - upgradeAgent := &fakeUpgradeAgent{} - grpcServer := grpc.NewServer() - upgradeproto.RegisterUpdateServer(grpcServer, upgradeAgent) - - listener, err := net.Listen("unix", sockAddr) - require.NoError(err) - go grpcServer.Serve(listener) - t.Cleanup(grpcServer.Stop) - - fileInfo, err := os.Stat(sockAddr) - require.NoError(err) - require.Equal(os.ModeSocket, fileInfo.Mode()&os.ModeType) - - upgradeClient := newClientWithAddress(sockAddr) - require.NoError(upgradeClient.Upgrade(t.Context(), []*components.Component{}, "v1.29.6")) -} - -type fakeUpgradeAgent struct { - upgradeproto.UnimplementedUpdateServer -} - -func (s *fakeUpgradeAgent) ExecuteUpdate(_ context.Context, _ *upgradeproto.ExecuteUpdateRequest) (*upgradeproto.ExecuteUpdateResponse, error) { - return &upgradeproto.ExecuteUpdateResponse{}, nil -} diff --git a/operators/constellation-node-operator/main.go b/operators/constellation-node-operator/main.go index 1422541d4..07e3b303b 100644 --- a/operators/constellation-node-operator/main.go +++ b/operators/constellation-node-operator/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/operators/constellation-node-operator/sgreconciler/nodejoin_watcher.go b/operators/constellation-node-operator/sgreconciler/nodejoin_watcher.go index 29b01f2bb..2b31aef50 100644 --- a/operators/constellation-node-operator/sgreconciler/nodejoin_watcher.go +++ b/operators/constellation-node-operator/sgreconciler/nodejoin_watcher.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sgreconciler diff --git a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller.go b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller.go index 3a8529827..ceb5805f4 100644 --- a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller.go +++ b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sgreconciler diff --git a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_env_test.go b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_env_test.go index 85748be5c..bbb20a71e 100644 --- a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_env_test.go +++ b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_env_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sgreconciler diff --git a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_test.go b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_test.go index 488f87923..5f312d97b 100644 --- a/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_test.go +++ b/operators/constellation-node-operator/sgreconciler/scalinggroup_controller_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sgreconciler @@ -88,7 +88,7 @@ func TestCreateScalingGroupIfNotExists(t *testing.T) { autoscalingGroupName: "autoscaling-group-name", role: updatev1alpha1.WorkerRole, } - err := createScalingGroupIfNotExists(t.Context(), newScalingGroupConfig) + err := createScalingGroupIfNotExists(context.Background(), newScalingGroupConfig) if tc.wantErr { assert.Error(err) return @@ -184,7 +184,7 @@ func TestPatchNodeGroupName(t *testing.T) { getErr: tc.getErr, updateErr: tc.updateErr, } - gotExists, gotErr := patchNodeGroupName(t.Context(), k8sClient, "resource-name", "node-group-name") + gotExists, gotErr := patchNodeGroupName(context.Background(), k8sClient, "resource-name", "node-group-name") if tc.wantErr { assert.Error(gotErr) return diff --git a/operators/constellation-node-operator/sgreconciler/sgreconciler.go b/operators/constellation-node-operator/sgreconciler/sgreconciler.go index 8cc4c2ada..57abc1cff 100644 --- a/operators/constellation-node-operator/sgreconciler/sgreconciler.go +++ b/operators/constellation-node-operator/sgreconciler/sgreconciler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // Package sgreconciler contains a reconciler that reconciles on cloud provider infrastructure. diff --git a/operators/constellation-node-operator/sgreconciler/suite_test.go b/operators/constellation-node-operator/sgreconciler/suite_test.go index ca1e1211c..ffb11a630 100644 --- a/operators/constellation-node-operator/sgreconciler/suite_test.go +++ b/operators/constellation-node-operator/sgreconciler/suite_test.go @@ -3,7 +3,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package sgreconciler diff --git a/renovate.json5 b/renovate.json5 index 79a9f7806..130de4a95 100644 --- a/renovate.json5 +++ b/renovate.json5 @@ -1,419 +1,281 @@ { - $schema: 'https://docs.renovatebot.com/renovate-schema.json', - extends: [ - 'config:recommended', - ':preserveSemverRanges', - 'helpers:pinGitHubActionDigests', - ':separateMajorReleases', - ':semanticCommitsDisabled', + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended", + ":preserveSemverRanges", + "helpers:pinGitHubActionDigests", + ":separateMajorReleases", + ":semanticCommitsDisabled", ], - commitMessagePrefix: 'deps:', - commitMessageAction: 'update', - addLabels: [ - 'dependencies' - ], - postUpdateOptions: [ - 'gomodTidy', - 'gomodUpdateImportPaths', - 'gomodMassage' - ], - ignorePaths: [ - 'internal/constellation/helm/charts/aws-load-balancer-controller/**', - 'internal/constellation/helm/charts/cilium/**', - 'internal/constellation/helm/charts/coredns/**', - 'internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/**', - 'internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/**', - 'internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/**', - 'internal/constellation/helm/charts/edgeless/csi/charts/openstack-cinder-csi/**', - 'operators/constellation-node-operator/config/manager/kustomization.yaml', - ], - ignoreDeps: [ - 'github.com/edgelesssys/constellation/v2', - 'github.com/daniel-weisse/go-cryptsetup', - 'github.com/daniel-weisse/go-sev-guest', - // Only update once they fixed dependency violations on their side. - 'github.com/google/go-tpm-tools', + "commitMessagePrefix": "deps:", + "commitMessageAction": "update", + "addLabels": ["dependencies"], + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"], + "prConcurrentLimit": 4, + "ignorePaths": [ + "internal/constellation/helm/charts/cilium/**", + "internal/constellation/helm/charts/edgeless/csi/charts/aws-csi-driver/**", + "internal/constellation/helm/charts/edgeless/csi/charts/azuredisk-csi-driver/**", + "internal/constellation/helm/charts/edgeless/csi/charts/gcp-compute-persistent-disk-csi-driver/**", + "internal/constellation/helm/charts/edgeless/csi/charts/openstack-cinder-csi/**", + "operators/constellation-node-operator/config/manager/kustomization.yaml", ], + "ignoreDeps": ["github.com/edgelesssys/constellation/v2"], // Rules for changing renovates behaviour for different packages. // The documentation for configuration options can be found here: // https://docs.renovatebot.com/configuration-options/ - packageRules: [ + "packageRules": [ { - matchManagers: [ - 'gomod' + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "matchUpdateTypes": [ + "minor", + "patch", + "pin", + "pinDigest", + "digest", + "lockFileMaintenance", + "rollback", + "bump", ], - matchDepTypes: [ - 'indirect' - ], - matchUpdateTypes: [ - 'minor', - 'patch', - 'pin', - 'pinDigest', - 'digest', - 'lockFileMaintenance', - 'rollback', - 'bump', - ], - enabled: true, - prPriority: -30, + "enabled": true, + "prPriority": -30, }, { - // Group update of direct Go dependencies. - groupName: 'Go dependencies', - matchManagers: [ - 'gomod' + "matchPackagePatterns": ["^k8s.io", "^sigs.k8s.io"], + "groupName": "K8s dependencies", + }, + { + "matchPackagePatterns": ["^go.etcd.io/etcd"], + "groupName": "etcd dependencies", + }, + { + "matchPackagePatterns": ["^github.com/hashicorp/go-kms-wrapping"], + "groupName": "github.com/hashicorp/go-kms-wrapping", + }, + { + "matchPackagePatterns": ["^github.com/aws/aws-sdk-go-v2"], + "groupName": "AWS SDK", + "prPriority": -10, + }, + { + "matchPackagePatterns": [ + "^github.com/Azure/", + "^github.com/AzureAD/microsoft-authentication-library-for-go", ], - matchDepTypes: [ - 'require', - 'replace' - ], - matchUpdateTypes: [ - 'bump', - 'digest', - 'lockFileMaintenance', - 'minor', - 'patch', - 'pin', - 'pinDigest', - 'rollback', - ], - matchDepNames: [ - '!github.com/bazelbuild/rules_go', - ], - schedule: [ - 'before 8am on monday', + "groupName": "Azure SDK", + }, + { + "matchPackagePatterns": ["^cloud.google.com/go"], + "groupName": "Google SDK", + }, + { + "matchPackagePatterns": ["^google.golang.org/genproto"], + "prPriority": -10, + }, + { + "matchPackagePatterns": ["^libvirt.org/go"], + "groupName": "libvirt.org/go", + }, + { + "matchManagers": ["bazelisk", "bazel", "bazel-module"], + "matchPackageNames": ["bazel", "io_bazel_rules_go", "bazel_gazelle"], + "groupName": "bazel (core)", + }, + { + "matchDatasources": ["golang-version"], + "allowedVersions": "1.19", + }, + { + "matchManagers": ["pip_requirements"], + "groupName": "Python dependencies", + }, + { + "matchManagers": ["github-actions"], + "groupName": "GitHub action dependencies", + "matchUpdateTypes": [ + "minor", + "patch", + "pin", + "pinDigest", + "digest", + "lockFileMaintenance", + "rollback", + "bump", ], }, { - // Group update of rules_go dependencies. - groupName: 'rules_go dependencies', - matchManagers: [ - 'gomod', - 'bazel', - 'bazel-module', - ], - matchDepNames: [ - 'github.com/bazelbuild/rules_go', - 'io_bazel_rules_go', - 'rules_go', - ], - schedule: [ - 'before 8am on tuesday', - ], + "matchPackageNames": ["kubernetes/kubernetes"], + // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) + "versioning": "regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$", + "groupName": "Kubernetes versions", + "prPriority": 15, }, { - // Group update of Terraform dependencies. - groupName: 'Terraform dependencies', - matchManagers: [ - 'terraform' - ], - matchUpdateTypes: [ - 'bump', - 'digest', - 'lockFileMaintenance', - 'minor', - 'patch', - 'pin', - 'pinDigest', - 'rollback', - ], - schedule: [ - 'before 8am on wednesday', - ], - }, - { - matchManagers: [ - 'bazelisk', - 'bazel', - 'bazel-module', - ], - matchDepNames: [ - 'bazel', - 'bazel_gazelle', - 'gazelle', - ], - groupName: 'bazel (core)', - }, - { - matchManagers: [ - 'bazel', - ], - matchDepNames: [ - '!bazel', - '!io_bazel_rules_go', - '!bazel_gazelle', - '!rules_go', - '!gazelle', - ], - groupName: 'bazel (plugins)', - }, - { - matchManagers: [ - 'bazel-module' - ], - matchDepNames: [ - '!bazel', - '!io_bazel_rules_go', - '!bazel_gazelle', - '!rules_go', - '!gazelle', - ], - groupName: 'bazel (modules)', - }, - { - matchDatasources: [ - 'golang-version' - ], - allowedVersions: '1.23', - }, - { - matchManagers: [ - 'pip_requirements' - ], - groupName: 'Python dependencies', - }, - { - matchManagers: [ - 'github-actions' - ], - groupName: 'GitHub action dependencies', - matchUpdateTypes: [ - 'major', - 'minor', - 'patch', - 'pin', - 'pinDigest', - 'digest', - 'lockFileMaintenance', - 'rollback', - 'bump', - ], - schedule: [ - 'before 8am on tuesday' - ], - }, - { - matchDepNames: [ - 'kubernetes/kubernetes' + "matchPackageNames": [ + "registry.k8s.io/provider-aws/cloud-controller-manager", ], // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) - versioning: 'regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$', - groupName: 'Kubernetes versions', - prPriority: 15, + "versioning": "regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$", + "groupName": "K8s constrained AWS versions", + "prPriority": 15, }, { - matchDepNames: [ - 'registry.k8s.io/provider-aws/cloud-controller-manager', + "matchPackageNames": [ + "mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager", + "mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager", ], // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) - versioning: 'regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$', - groupName: 'K8s constrained AWS versions', - prPriority: 15, + "versioning": "regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$", + "groupName": "K8s constrained Azure versions", + "prPriority": 15, }, { - matchDepNames: [ - 'mcr.microsoft.com/oss/kubernetes/azure-cloud-controller-manager', - 'mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager', + "matchPackageNames": [ + "docker.io/k8scloudprovider/openstack-cloud-controller-manager", ], // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) - versioning: 'regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$', - groupName: 'K8s constrained Azure versions', - prPriority: 15, + "versioning": "regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$", + "groupName": "K8s constrained OpenStack versions", + "prPriority": 15, }, { - matchDepNames: [ - 'docker.io/k8scloudprovider/openstack-cloud-controller-manager', - ], + "matchPackageNames": ["registry.k8s.io/autoscaling/cluster-autoscaler"], // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) - versioning: 'regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$', - groupName: 'K8s constrained OpenStack versions', - prPriority: 15, + "versioning": "regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$", + "groupName": "K8s constrained GCP versions", + "prPriority": 15, }, { - matchDepNames: [ - 'registry.k8s.io/autoscaling/cluster-autoscaler' - ], - // example match: v1.2.3 (1.2 -> compatibility, 3 -> patch) - versioning: 'regex:^(?v?\\d+\\.\\d+\\.)(?\\d+)$', - groupName: 'K8s constrained GCP versions', - prPriority: 15, - }, - { - matchDepNames: [ - 'ghcr.io/edgelesssys/cloud-provider-gcp' - ], + "matchPackageNames": ["ghcr.io/edgelesssys/cloud-provider-gcp"], // example match: v1.2.3 (1. -> compatibility, 2 -> minor, 3 -> patch) - versioning: 'regex:^(?v\\d+\\.)(?\\d+)\\.(?\\d+)$', - groupName: 'cloud-provider-gcp (K8s version constrained)', - prPriority: 15, + "versioning": "regex:^(?v\\d+\\.)(?\\d+)\\.(?\\d+)$", + "groupName": "cloud-provider-gcp (K8s version constrained)", + "prPriority": 15, }, { - matchPackageNames: [ - 'ghcr.io/edgelesssys/{/,}**', - '!ghcr.io/edgelesssys/cloud-provider-gcp', - '!ghcr.io/edgelesssys/constellation/s3proxy', - ], - versioning: 'semver', + "matchPackagePrefixes": ["ghcr.io/edgelesssys/"], + "excludePackageNames": ["ghcr.io/edgelesssys/cloud-provider-gcp"], + "versioning": "semver", // Allow packages of ghcr.io/edgelesssys to update to unstable prereleases. // This is necessary because renovate will not update minor versions of // containers that are already tagged as a prerelease in the code // if this is not set. - ignoreUnstable: false, - groupName: 'Constellation containers', - prPriority: 20, - schedule: [ - 'before 8am on thursday' - ], + "ignoreUnstable": false, + "groupName": "Constellation containers", + "prPriority": 20, }, { - matchDepNames: [ - 'registry.k8s.io/kas-network-proxy/proxy-agent', - 'registry.k8s.io/kas-network-proxy/proxy-server', + "matchPackageNames": [ + "registry.k8s.io/kas-network-proxy/proxy-agent", + "registry.k8s.io/kas-network-proxy/proxy-server", ], - versioning: 'semver', - groupName: 'K8s version independent containers', - prPriority: 15, + "versioning": "semver", + "groupName": "K8s version independent containers", + "prPriority": 15, }, { - matchDepNames: [ - '^k8s.io/client-go' - ], - matchUpdateTypes: [ - 'major' - ], - enabled: false, + "matchPackageNames": ["^k8s.io/client-go"], + "matchUpdateTypes": ["major"], + "enabled": false, }, { - matchCategories: [ - 'python', - 'js', - 'node' - ], - prPriority: -20, + "matchCategories": ["python", "js", "node"], + "prPriority": -20, }, { - matchManagers: [ - 'github-actions' - ], - matchDepNames: [ - 'slsa-framework/slsa-github-generator' - ], - pinDigests: false, + "matchManagers": ["github-actions"], + "matchPackageNames": ["slsa-framework/slsa-github-generator"], + "pinDigests": false, }, { - additionalBranchPrefix: '{{packageName}}-', - groupName: '{{packageName}}', - matchDepNames: [ - '/_(darwin|linux)_(arm64|amd64)$/', - ], + "matchPackagePatterns": ["_(darwin|linux)_(arm64|amd64)$"], + "additionalBranchPrefix": "{{packageName}}-", + "groupName": "{{packageName}}", }, ], // Regex Managers allow detection of other versions in files that renovate // cannot parse by default. For more information, look at // https://docs.renovatebot.com/modules/manager/regex/ . - customManagers: [ + "regexManagers": [ { - customType: 'regex', - fileMatch: [ - '(^|\\/)versions.go$' - ], - matchStrings: [ + "fileMatch": ["(^|\\/)versions.go$"], + "matchStrings": [ // Match all container packages. // example match:' "registry.io/owner/foo/bar:v1.2.3@sha256:somehash" // renovate:container' // (registry.io/owner/foo/bar -> depName, v1.2.3 -> currentValue, sha256:somehash -> currentDigest) - ' \"(?[^\"]*?):(?[^\"]*?)@(?sha256:[a-f0-9]+)\"[^\\n]+\\/\\/ renovate:container', + " \"(?[^\"]*?):(?[^\"]*?)@(?sha256:[a-f0-9]+)\"[^\\n]+\\/\\/ renovate:container", ], - datasourceTemplate: 'docker', + "datasourceTemplate": "docker", }, { - customType: 'regex', - fileMatch: [ - '(^|\\/)versions.go$' - ], - matchStrings: [ + "fileMatch": ["(^|\\/)versions.go$"], + "matchStrings": [ // Match kubernetes releases. - // example match:' "https://dl.k8s.io/v1.2.3/foo" // renovate:kubernetes-release' + // example match:' "https://storage.googleapis.com/kubernetes-release/release/v1.2.3/foo" // renovate:kubernetes-release' // (v1.2.3 -> currentValue) - ' \"https:\\/\\/dl\\.k8s\\.io\\/(?[^\\/\\s\"]+)\\/[^\"]+\"[^\\n]+\\/\\/ renovate:kubernetes-release', + " \"https:\\/\\/storage\\.googleapis\\.com\\/kubernetes-release\\/release\\/(?[^\\/\\s\"]+)\\/[^\"]+\"[^\\n]+\\/\\/ renovate:kubernetes-release", // Match kubernetes releases. // example match:' " "v1.2.3" // renovate:kubernetes-release"' // (v1.2.3 -> currentValue) - ' \"(?v\\d+\\.\\d+\\.\\d+)\"[^\\n]+\\/\\/ renovate:kubernetes-release', + " \"(?v\\d+\\.\\d+\\.\\d+)\"[^\\n]+\\/\\/ renovate:kubernetes-release", ], - depNameTemplate: 'kubernetes/kubernetes', - datasourceTemplate: 'github-releases', + "depNameTemplate": "kubernetes/kubernetes", + "datasourceTemplate": "github-releases", }, { - customType: 'regex', - fileMatch: [ - '(^|\\/)versions.go$', - '[.]github\\/(actions|workflows)\\/.*[.]ya?ml' - ], - matchStrings: [ + "fileMatch": ["(^|\\/)versions.go$"], + "matchStrings": [ // Match github releases. // example match:' "https://github.com/foo/bar/releases/download/v1.2.3/foo.bin" // renovate:github-release' // (foo/bar -> depName, v1.2.3 -> currentValue) - 'https:\\/\\/github\\.com\\/(?[^\\/\\s\"]+\\/[^\\/\\s\"]+)\\/releases\\/download\\/(?[^\\/\\s\"]+).*renovate:github-release', + " \"https:\\/\\/github\\.com\\/(?[^\\/\\s\"]+\\/[^\\/\\s\"]+)\\/releases\\/download\\/(?[^\\/\\s\"]+)\\/[^\"]+\"[^\\n]+\\/\\/ renovate:github-release", ], - datasourceTemplate: 'github-releases', + "datasourceTemplate": "github-releases", }, { - customType: 'regex', - fileMatch: [ - '(^|\\/)versions.go$' - ], - matchStrings: [ + "fileMatch": ["(^|\\/)versions.go$"], + "matchStrings": [ // Match kubernetes cri-tools releases (https://github.com/kubernetes-sigs/cri-tools). // example Match:' "https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.2.3/foo"' // (v1.2.3 -> currentValue) - ' \"https:\\/\\/github\\.com\\/kubernetes-sigs\\/cri-tools\\/releases\\/download\\/(?[^\\/\\s\"]+)\\/[^\"]+\"', + " \"https:\\/\\/github\\.com\\/kubernetes-sigs\\/cri-tools\\/releases\\/download\\/(?[^\\/\\s\"]+)\\/[^\"]+\"", ], - depNameTemplate: 'kubernetes-sigs/cri-tools', - datasourceTemplate: 'github-releases', - autoReplaceStringTemplate: ' \"https://github.com/kubernetes-sigs/cri-tools/releases/download/{{{newValue}}}/crictl-{{{newValue}}}-linux-amd64.tar.gz\"', + "depNameTemplate": "kubernetes-sigs/cri-tools", + "datasourceTemplate": "github-releases", + "autoReplaceStringTemplate": " \"https://github.com/kubernetes-sigs/cri-tools/releases/download/{{{newValue}}}/crictl-{{{newValue}}}-linux-amd64.tar.gz\"", }, { - customType: 'regex', - fileMatch: [ - 'versions.go$' - ], - matchStrings: [ + "fileMatch": ["versions.go$"], + "matchStrings": [ // Match containernetworking plugin releases (https://github.com/containernetworking/plugins). // example Match:' "https://github.com/containernetworking/plugins/releases/download/v1.2.3/foo"' // (v1.2.3 -> currentValue) - ' \"https:\\/\\/github\\.com\\/containernetworking\\/plugins\\/releases\\/download\\/(?[^\\/\\s\"]+)\\/[^\"]+\"', + " \"https:\\/\\/github\\.com\\/containernetworking\\/plugins\\/releases\\/download\\/(?[^\\/\\s\"]+)\\/[^\"]+\"", ], - depNameTemplate: 'containernetworking/plugins', - datasourceTemplate: 'github-releases', - autoReplaceStringTemplate: ' \"https://github.com/containernetworking/plugins/releases/download/{{{newValue}}}/cni-plugins-linux-amd64-{{{newValue}}}.tgz\"', + "depNameTemplate": "containernetworking/plugins", + "datasourceTemplate": "github-releases", + "autoReplaceStringTemplate": " \"https://github.com/containernetworking/plugins/releases/download/{{{newValue}}}/cni-plugins-linux-amd64-{{{newValue}}}.tgz\"", }, { - customType: 'regex', - fileMatch: [ - '\\.yaml$', - '\\.yml$' - ], - matchStrings: [ + "fileMatch": ["\\.yaml$", "\\.yml$"], + "matchStrings": [ // Match `go install` commands. - // example Match: 'go install foo.bar@0000000000000000000000000000000000000000' + // example Match: "go install foo.bar@0000000000000000000000000000000000000000" // (foo.bar -> depName, 0000000000000000000000000000000000000000 -> currentValue) - 'go install (?[^@]+?)@(?[0-9a-f]{40})', + "go install (?[^@]+?)@(?[0-9a-f]{40})", ], - datasourceTemplate: 'go', + "datasourceTemplate": "go", }, { - customType: 'regex', - fileMatch: [ - '(^|\\/)e2e_s3proxy/action.yml$' - ], - matchStrings: [ + "fileMatch": ["(^|\\/)e2e_s3proxy/action.yml$"], + "matchStrings": [ // Match mint tags (ghcr.io/edgelesssys/mint). // example Match:' "ghcr.io/edgelesssys/mint:v1.2.3"' // (ghcr.io/edgelesssys/mint -> depName, v1.2.3 -> currentValue) - 'IMAGE: \"(?[^\"]*?):(?[^\"]*?)@(?sha256:[a-f0-9]+)\"[^\\n]+# renovate:mint-fork', + "IMAGE: \"(?[^\"]*?):(?[^\"]*?)@(?sha256:[a-f0-9]+)\"[^\\n]+# renovate:mint-fork", ], - datasourceTemplate: 'docker', + "datasourceTemplate": "docker", }, ], } diff --git a/rfc/006-reproducible-builds.md b/rfc/006-reproducible-builds.md index 3746d31e9..fd1849c20 100644 --- a/rfc/006-reproducible-builds.md +++ b/rfc/006-reproducible-builds.md @@ -31,7 +31,7 @@ Problems arise when the default images do not satisfy our dependency needs (as t To solve this issue we have three options: 1. Build our own base images independent from or inspired by distroless -2. Fork [distroless](https://github.com/GoogleContainerTools/distroless), edit underlying Bazel dependencies, build the image +2. Fork distroless, edit underlying [bazel dependencies](https://github.com/GoogleContainerTools/distroless/blob/main/debian_archives.bzl), build the image 3. Use `apko` to build minimal Alpine images. These images can be configured via `apko` and a declarative `*.yaml` config file. Option `1.`: Results in a similar maintenance work as using `buildah`. diff --git a/rfc/016-node-access.md b/rfc/016-node-access.md deleted file mode 100644 index abc495400..000000000 --- a/rfc/016-node-access.md +++ /dev/null @@ -1,184 +0,0 @@ -# RFC 016: Node Access - -## Background - -A production Constellation cluster is currently configured not to allow any kind of remote administrative access. -This choice is deliberate: any mechanism for remote accesss can potentially be exploited, or may leak sensitive data. - -However, some operations on a Kubernetes cluster require some form of access to the nodes. -A good class of examples are etcd cluster maintenance tasks, like backup and recovery, or emergency operations like removing a permanently failed member. -Some kubeadm operations, like certificate rotation, also require some form of cluster access. - -While some tasks can be accomplished by DaemonSets, CronJobs and the like, relying on Kubernetes objects is insufficient. -Executing commands in a Kubernetes pod may fail because Kubernetes is not healthy, etcd is bricked or the network is down. -Administrative access to the nodes through a side channel would greatly help remediate, or at least debug, those situations. - -## Requirements - -Constellation admins can log into Constellation nodes for maintenance, subject to the following restrictions: - -* Access must be encrypted end-to-end to protect from CSP snooping. -* Access must be possible even if the Kubernetes API server is down. - -Nice-to-have: - -* The method of access should not require long-term storage of a second secret. -* The method of access should be time-limited. - -## Proposed Design - -Core to the proposal is [certificate-based authentication for OpenSSH](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Certificate-based_Authentication). -We can derive a valid SSH key from the Constellation master secret. -An OpenSSH server on the node accepts certificates issued by this CA key. -Admins can derive the CA key from the master secret on demand, and issue certificates for arbitrary public keys. -An example program is in the [Appendix](#appendix). - -### Key Details - -We use an HKDF to derive an ed25519 private key from the master secret. -This private key acts as an SSH certificate authority, whose signed certs allow access to cluster nodes. -Since the master secret is available to both the cluster owner and the nodes, no communication with the cluster is needed to mint valid certificates. -The choice of curve allows to directly use the derived secret bytes as key. -This makes the implementation deterministic, and thus the CA key recoverable. - -### Server-side Details - -An OpenSSH server is added to the node image software stack. -It's configured with a `TrustedUserCAKeys` file and a `RevokedKeys` file, both being empty on startup. -All other means of authentication are disabled. - -After initialization, the bootstrapper fills the `TrustedUserCAKeys` file with the derived CA's public key. -Joining nodes send their public host key as part of the `IssueJoinTokenRequest` and receive the CA certificate and an indefinitely valid certificate as response. - -The `RevokedKeys` KRL is an option for the cluster administrator to revoke keys, but it's not managed by Constellation. - -### Client-side Details - -A new `ssh` subcommand is added to the CLI. -The exact name is TBD, but it should fit in with other key-related activity, like generating volume keys. -It takes the master secret file and an SSH pub key file as arguments, and writes a certificate to stdout. -Optional arguments may include principals or vailidity period. -The implementation could roughly follow the PoC in the [Appendix](#appendix). - -As an extension, the subcommand could allow generating a key pair and a matching certificate in a temp dir, and `exec` the ssh program directly. -This would encourage use of very short-lived certificates. - -## Security Considerations - -Exposing an additional service to the outside world increases the attack surface. -We propose the following mitigations: - -1. The SSH port is only exposed to the VPC. - This restricts the attackers to malicious co-tenants and the CSP. - In an emergency, admins need to add a load balancer to be able to reach the nodes. -2. A hardened OpenSSH config only allows the options strictly necessary for the scheme proposed here. - Authorized keys and passwords must be disabled. - Cipher suites should be restricted. etc. - -## Alternatives Considered - -### Enable Serial Console - -Serial consoles for cloud VMs are tunnelled through the CSP in the clear. -To make this solution secure, an encrypted channel would need to be established on top of the serial connection. -The author is not aware of any software providing such a channel. - -### SSH with Authorized Keys - -We could ask users to add a public key to their `constellation-conf.yaml` and add that to `/root/.ssh/authorized_keys` after joining. -This would require the cluster owner to permanently manage a second secret, and there would be no built-in way to revoke access. - -### Debug Pod - -Some node administration tasks can be performed with a [debug pod]. -If privileged access is required, it's usually necessary to schedule a custom pod. -This only works if the Kubernetes API server is still processing requests, pods can be scheduled on the target node and the network allows connecting to it. - -[debug pod]: https://kubernetes.io/docs/tasks/debug/debug-cluster/kubectl-node-debug/ - -### Host an Admin API Server - -There are alternatives to SSH that allow fine-grained authorization of node operations. -An example would be [SansShell], which verifies node access requests with a policy. -Setting up such a tool requires a detailed understanding of the use cases, of which some might be hard to foresee. -This may be better suited as an extension of the low-level emergency access mechanisms. - -[SansShell]: https://github.com/Snowflake-Labs/sansshell - -## Appendix - -A proof-of-concept implementation of the certificate generation. -Constellation nodes would stop after deriving the CA public key. - -```golang -package main - -import ( - "crypto/ed25519" - "crypto/rand" - "crypto/sha256" - "encoding/json" - "flag" - "fmt" - "log" - "os" - "time" - - "golang.org/x/crypto/hkdf" - "golang.org/x/crypto/ssh" -) - -type secret struct { - Key []byte `json:"key,omitempty"` - Salt []byte `json:"salt,omitempty"` -} - -var permissions = ssh.Permissions{ - Extensions: map[string]string{ - "permit-port-forwarding": "yes", - "permit-pty": "yes", - }, -} - -func main() { - masterSecret := flag.String("secret", "", "") - flag.Parse() - - secretJSON, err := os.ReadFile(*masterSecret) - must(err) - var secret secret - must(json.Unmarshal(secretJSON, &secret)) - - hkdf := hkdf.New(sha256.New, secret.Key, secret.Salt, []byte("ssh-ca")) - - _, priv, err := ed25519.GenerateKey(hkdf) - must(err) - - ca, err := ssh.NewSignerFromSigner(priv) - must(err) - - log.Printf("CA KEY: %s", string(ssh.MarshalAuthorizedKey(ca.PublicKey()))) - - buf, err := os.ReadFile(flag.Arg(0)) - must(err) - pub, _, _, _, err := ssh.ParseAuthorizedKey(buf) - must(err) - certificate := ssh.Certificate{ - Key: pub, - CertType: ssh.UserCert, - ValidAfter: uint64(time.Now().Unix()), - ValidBefore: uint64(time.Now().Add(24 * time.Hour).Unix()), - ValidPrincipals: []string{"root"}, - Permissions: permissions, - } - must(certificate.SignCert(rand.Reader, ca)) - - fmt.Printf("%s\n", string(ssh.MarshalAuthorizedKey(&certificate))) -} - -func must(err error) { - if err != nil { - log.Fatal(err) - } -} -``` diff --git a/s3proxy/cmd/main.go b/s3proxy/cmd/main.go index 9db11dea8..b0a017856 100644 --- a/s3proxy/cmd/main.go +++ b/s3proxy/cmd/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -40,6 +40,13 @@ func main() { panic(err) } + // logLevel can be made a public variable so logging level can be changed dynamically. + // TODO (derpsteb): enable once we are on go 1.21. + // logLevel := new(slog.LevelVar) + // handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}) + // logger := slog.New(handler) + // logLevel.Set(flags.logLevel) + logger := logger.NewJSONLogger(logger.VerbosityFromInt(flags.logLevel)) if flags.forwardMultipartReqs { @@ -103,6 +110,12 @@ func parseFlags() (cmdFlags, error) { return cmdFlags{}, fmt.Errorf("not a valid IPv4 address: %s", *ip) } + // TODO(derpsteb): enable once we are on go 1.21. + // logLevel := new(slog.Level) + // if err := logLevel.UnmarshalText([]byte(*level)); err != nil { + // return cmdFlags{}, fmt.Errorf("parsing log level: %w", err) + // } + return cmdFlags{ noTLS: *noTLS, ip: netIP.String(), @@ -121,5 +134,7 @@ type cmdFlags struct { certLocation string kmsEndpoint string forwardMultipartReqs bool - logLevel int + // TODO(derpsteb): enable once we are on go 1.21. + // logLevel slog.Level + logLevel int } diff --git a/s3proxy/deploy/deployment-s3proxy.yaml b/s3proxy/deploy/deployment-s3proxy.yaml index 73c7bf098..aa8e4b1f0 100644 --- a/s3proxy/deploy/deployment-s3proxy.yaml +++ b/s3proxy/deploy/deployment-s3proxy.yaml @@ -72,7 +72,7 @@ spec: spec: containers: - name: s3proxy - image: ghcr.io/edgelesssys/constellation/s3proxy:v2.23.0 + image: ghcr.io/edgelesssys/constellation/s3proxy:v2.13.0-pre args: - "--level=-1" ports: diff --git a/s3proxy/deploy/s3proxy/Chart.yaml b/s3proxy/deploy/s3proxy/Chart.yaml index f07afba51..f0e1f34bd 100644 --- a/s3proxy/deploy/s3proxy/Chart.yaml +++ b/s3proxy/deploy/s3proxy/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 name: s3proxy description: Helm chart to deploy s3proxy. type: application -version: 0.0.0 +version: 2.16.3 diff --git a/s3proxy/deploy/s3proxy/values.yaml b/s3proxy/deploy/s3proxy/values.yaml index d70f349e3..eb3155e23 100644 --- a/s3proxy/deploy/s3proxy/values.yaml +++ b/s3proxy/deploy/s3proxy/values.yaml @@ -3,7 +3,7 @@ awsAccessKeyID: "replaceme" awsSecretAccessKey: "replaceme" # Pod image to deploy. -image: "ghcr.io/edgelesssys/constellation/s3proxy:v2.23.1" +image: "ghcr.io/edgelesssys/constellation/s3proxy:v2.16.0-pre.0.20240221184016-522f2858c6ef" # Control if multipart uploads are blocked. allowMultipart: false diff --git a/s3proxy/internal/crypto/crypto.go b/s3proxy/internal/crypto/crypto.go index 6ab7b6837..bdc117a7b 100644 --- a/s3proxy/internal/crypto/crypto.go +++ b/s3proxy/internal/crypto/crypto.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/s3proxy/internal/crypto/crypto_test.go b/s3proxy/internal/crypto/crypto_test.go index 7b55ba54e..4fb17e87d 100644 --- a/s3proxy/internal/crypto/crypto_test.go +++ b/s3proxy/internal/crypto/crypto_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package crypto diff --git a/s3proxy/internal/kms/BUILD.bazel b/s3proxy/internal/kms/BUILD.bazel index 630b13818..bb7bc1cd4 100644 --- a/s3proxy/internal/kms/BUILD.bazel +++ b/s3proxy/internal/kms/BUILD.bazel @@ -8,7 +8,7 @@ go_library( visibility = ["//s3proxy:__subpackages__"], deps = [ "//keyservice/keyserviceproto", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", ], ) @@ -21,7 +21,7 @@ go_test( "//internal/logger", "//keyservice/keyserviceproto", "@com_github_stretchr_testify//assert", - "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//test/bufconn", "@org_uber_go_goleak//:goleak", ], diff --git a/s3proxy/internal/kms/kms.go b/s3proxy/internal/kms/kms.go index f935c3f2c..d6c100b20 100644 --- a/s3proxy/internal/kms/kms.go +++ b/s3proxy/internal/kms/kms.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* @@ -42,7 +42,7 @@ func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byt // the KMS does not use aTLS since traffic is only routed through the Constellation cluster // cluster internal connections are considered trustworthy log.Info("Connecting to KMS") - conn, err := grpc.NewClient(c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/s3proxy/internal/kms/kms_test.go b/s3proxy/internal/kms/kms_test.go index 974bce382..cd831ddc4 100644 --- a/s3proxy/internal/kms/kms_test.go +++ b/s3proxy/internal/kms/kms_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package kms @@ -60,7 +60,7 @@ func TestGetDataKey(t *testing.T) { client.grpc = tc.client - res, err := client.GetDataKey(t.Context(), "disk-uuid", 32) + res, err := client.GetDataKey(context.Background(), "disk-uuid", 32) if tc.wantErr { assert.Error(err) } else { diff --git a/s3proxy/internal/router/handler.go b/s3proxy/internal/router/handler.go index c0005fb74..a85b97a1a 100644 --- a/s3proxy/internal/router/handler.go +++ b/s3proxy/internal/router/handler.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package router diff --git a/s3proxy/internal/router/object.go b/s3proxy/internal/router/object.go index 78b69ee7c..d7f6779f1 100644 --- a/s3proxy/internal/router/object.go +++ b/s3proxy/internal/router/object.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package router diff --git a/s3proxy/internal/router/router.go b/s3proxy/internal/router/router.go index c8ab7ebf7..0efa4302f 100644 --- a/s3proxy/internal/router/router.go +++ b/s3proxy/internal/router/router.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/s3proxy/internal/router/router_test.go b/s3proxy/internal/router/router_test.go index f3cc1b959..a690ce669 100644 --- a/s3proxy/internal/router/router_test.go +++ b/s3proxy/internal/router/router_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package router diff --git a/s3proxy/internal/s3/s3.go b/s3proxy/internal/s3/s3.go index f02ca4aa1..462530be7 100644 --- a/s3proxy/internal/s3/s3.go +++ b/s3proxy/internal/s3/s3.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ /* diff --git a/terraform-provider-constellation/docs/data-sources/attestation.md b/terraform-provider-constellation/docs/data-sources/attestation.md index 0dbf32a3d..ec4118c0f 100644 --- a/terraform-provider-constellation/docs/data-sources/attestation.md +++ b/terraform-provider-constellation/docs/data-sources/attestation.md @@ -32,7 +32,6 @@ data "constellation_attestation" "test" { * `aws-nitro-tpm` * `azure-sev-snp` * `azure-tdx` - * `gcp-sev-snp` * `gcp-sev-es` * `qemu-vtpm` - `csp` (String) CSP (Cloud Service Provider) to use. (e.g. `azure`) @@ -42,8 +41,7 @@ See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview ### Optional - `insecure` (Boolean) DON'T USE IN PRODUCTION Skip the signature verification when fetching measurements for the image. -- `maa_url` (String) For Azure only, the URL of the Microsoft Azure Attestation service. The MAA's policy needs to be patched manually to work with Constellation OS images. -See the [Constellation documentation](https://docs.edgeless.systems/constellation/workflows/terraform-provider#quick-setup) for more information. +- `maa_url` (String) For Azure only, the URL of the Microsoft Azure Attestation service ### Read-Only @@ -84,7 +82,6 @@ Read-Only: * `aws-nitro-tpm` * `azure-sev-snp` * `azure-tdx` - * `gcp-sev-snp` * `gcp-sev-es` * `qemu-vtpm` diff --git a/terraform-provider-constellation/docs/data-sources/image.md b/terraform-provider-constellation/docs/data-sources/image.md index 7933e93fc..7f7186b56 100644 --- a/terraform-provider-constellation/docs/data-sources/image.md +++ b/terraform-provider-constellation/docs/data-sources/image.md @@ -31,7 +31,6 @@ data "constellation_image" "example" { * `aws-nitro-tpm` * `azure-sev-snp` * `azure-tdx` - * `gcp-sev-snp` * `gcp-sev-es` * `qemu-vtpm` - `csp` (String) CSP (Cloud Service Provider) to use. (e.g. `azure`) diff --git a/terraform-provider-constellation/docs/resources/cluster.md b/terraform-provider-constellation/docs/resources/cluster.md index 5d9ebca32..7b6d1ca21 100644 --- a/terraform-provider-constellation/docs/resources/cluster.md +++ b/terraform-provider-constellation/docs/resources/cluster.md @@ -69,7 +69,7 @@ resource "constellation_cluster" "azure_example" { See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview/clouds) that Constellation supports. - `image` (Attributes) Constellation OS Image to use on the nodes. (see [below for nested schema](#nestedatt--image)) - `init_secret` (String) Secret used for initialization of the cluster. -- `kubernetes_version` (String) The Kubernetes version to use for the cluster. The supported versions are [v1.30.14 v1.31.11 v1.32.7]. +- `kubernetes_version` (String) The Kubernetes version to use for the cluster. The supported versions are [v1.27.9 v1.28.5 v1.29.0]. - `master_secret` (String) Hex-encoded 32-byte master secret for the cluster. - `master_secret_salt` (String) Hex-encoded 32-byte master secret salt for the cluster. - `measurement_salt` (String) Hex-encoded 32-byte measurement salt for the cluster. @@ -90,12 +90,8 @@ See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview ### Read-Only -- `client_certificate` (String) The client certificate of the cluster. -- `client_key` (String, Sensitive) The client key of the cluster. -- `cluster_ca_certificate` (String) The cluster CA certificate of the cluster. - `cluster_id` (String) The cluster ID of the cluster. -- `host` (String) The host of the cluster. -- `kubeconfig` (String, Sensitive) The kubeconfig (file) of the cluster. +- `kubeconfig` (String, Sensitive) The kubeconfig of the cluster. - `owner_id` (String) The owner ID of the cluster. @@ -114,7 +110,6 @@ Required: * `aws-nitro-tpm` * `azure-sev-snp` * `azure-tdx` - * `gcp-sev-snp` * `gcp-sev-es` * `qemu-vtpm` diff --git a/terraform-provider-constellation/examples/full/aws/main.tf b/terraform-provider-constellation/examples/full/aws/main.tf index 03856e81b..4a467d5e4 100644 --- a/terraform-provider-constellation/examples/full/aws/main.tf +++ b/terraform-provider-constellation/examples/full/aws/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { constellation = { source = "edgelesssys/constellation" - version = "2.23.1" // replace with the version you want to use + version = "0.0.0" // replace with the version you want to use } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } diff --git a/terraform-provider-constellation/examples/full/azure/main.tf b/terraform-provider-constellation/examples/full/azure/main.tf index 761406f61..46a5f8f9b 100644 --- a/terraform-provider-constellation/examples/full/azure/main.tf +++ b/terraform-provider-constellation/examples/full/azure/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { constellation = { source = "edgelesssys/constellation" - version = "2.23.1" // replace with the version you want to use + version = "0.0.0" // replace with the version you want to use } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -21,8 +21,7 @@ locals { location = "northeurope" control_plane_count = 3 worker_count = 2 - instance_type = "Standard_DC4as_v5" // Adjust if using TDX - subscription_id = "00000000-0000-0000-0000-000000000000" + instance_type = "Standard_DC4as_v5" master_secret = random_bytes.master_secret.hex master_secret_salt = random_bytes.master_secret_salt.hex @@ -44,7 +43,6 @@ resource "random_bytes" "measurement_salt" { module "azure_iam" { // replace $VERSION with the Constellation version you want to use, e.g., v2.14.0 source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/iam/azure" - subscription_id = local.subscription_id location = local.location service_principal_name = "${local.name}-sp" resource_group_name = "${local.name}-rg" @@ -53,7 +51,6 @@ module "azure_iam" { module "azure_infrastructure" { // replace $VERSION with the Constellation version you want to use, e.g., v2.14.0 source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/azure" - subscription_id = local.subscription_id name = local.name user_assigned_identity = module.azure_iam.uami_id node_groups = { @@ -83,9 +80,7 @@ data "constellation_attestation" "foo" { csp = local.csp attestation_variant = local.attestation_variant image = data.constellation_image.bar.image - # Needs to be patched manually, see: - # https://docs.edgeless.systems/constellation/workflows/terraform-provider#quick-setup - maa_url = module.azure_infrastructure.attestation_url + maa_url = module.azure_infrastructure.attestation_url } data "constellation_image" "bar" { diff --git a/terraform-provider-constellation/examples/full/gcp/main.tf b/terraform-provider-constellation/examples/full/gcp/main.tf index c16906785..f7ac80b04 100644 --- a/terraform-provider-constellation/examples/full/gcp/main.tf +++ b/terraform-provider-constellation/examples/full/gcp/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { constellation = { source = "edgelesssys/constellation" - version = "2.23.1" // replace with the version you want to use + version = "0.0.0" // replace with the version you want to use } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -17,14 +17,13 @@ locals { kubernetes_version = "vX.Y.Z" microservice_version = "vX.Y.Z" csp = "gcp" - attestation_variant = "gcp-sev-snp" + attestation_variant = "gcp-sev-es" region = "europe-west3" zone = "europe-west3-b" project_id = "constellation-331613" control_plane_count = 3 worker_count = 2 instance_type = "n2d-standard-4" - cc_technology = "SEV_SNP" master_secret = random_bytes.master_secret.hex master_secret_salt = random_bytes.master_secret_salt.hex @@ -45,11 +44,11 @@ resource "random_bytes" "measurement_salt" { module "gcp_iam" { // replace $VERSION with the Constellation version you want to use, e.g., v2.14.0 - source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/iam/gcp" - project_id = local.project_id - name_prefix = local.name - zone = local.zone - region = local.region + source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/iam/gcp" + project_id = local.project_id + service_account_id = "${local.name}-sa" + zone = local.zone + region = local.region } module "gcp_infrastructure" { @@ -80,8 +79,6 @@ module "gcp_infrastructure" { region = local.region project = local.project_id internal_load_balancer = false - cc_technology = local.cc_technology - iam_service_account_vm = module.gcp_iam.service_account_mail_vm } data "constellation_attestation" "foo" { diff --git a/terraform-provider-constellation/examples/full/stackit/main.tf b/terraform-provider-constellation/examples/full/stackit/main.tf index 403e57e02..22ef92451 100644 --- a/terraform-provider-constellation/examples/full/stackit/main.tf +++ b/terraform-provider-constellation/examples/full/stackit/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { constellation = { source = "edgelesssys/constellation" - version = "2.23.1" // replace with the version you want to use + version = "0.0.0" // replace with the version you want to use } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } diff --git a/terraform-provider-constellation/internal/data/data.go b/terraform-provider-constellation/internal/data/data.go index 077ae745d..733f76703 100644 --- a/terraform-provider-constellation/internal/data/data.go +++ b/terraform-provider-constellation/internal/data/data.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // The data package implements the structures used to pass data between different resources. diff --git a/terraform-provider-constellation/internal/data/providerdata.go b/terraform-provider-constellation/internal/data/providerdata.go index 1e9bb6f1f..118272082 100644 --- a/terraform-provider-constellation/internal/data/providerdata.go +++ b/terraform-provider-constellation/internal/data/providerdata.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package data diff --git a/terraform-provider-constellation/internal/provider/BUILD.bazel b/terraform-provider-constellation/internal/provider/BUILD.bazel index e60064603..1fac7618a 100644 --- a/terraform-provider-constellation/internal/provider/BUILD.bazel +++ b/terraform-provider-constellation/internal/provider/BUILD.bazel @@ -32,7 +32,6 @@ go_library( "//internal/constellation/helm", "//internal/constellation/kubecmd", "//internal/constellation/state", - "//internal/encoding", "//internal/file", "//internal/grpc/dialer", "//internal/imagefetcher", @@ -58,7 +57,6 @@ go_library( "@com_github_hashicorp_terraform_plugin_framework_validators//stringvalidator", "@com_github_hashicorp_terraform_plugin_log//tflog", "@com_github_spf13_afero//:afero", - "@io_k8s_client_go//tools/clientcmd", ], ) @@ -110,6 +108,6 @@ go_test( "@com_github_hashicorp_terraform_plugin_testing//terraform", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@io_bazel_rules_go//go/runfiles", + "@io_bazel_rules_go//go/runfiles:go_default_library", ], ) diff --git a/terraform-provider-constellation/internal/provider/attestation_data_source.go b/terraform-provider-constellation/internal/provider/attestation_data_source.go index 4f59504f0..56815ae22 100644 --- a/terraform-provider-constellation/internal/provider/attestation_data_source.go +++ b/terraform-provider-constellation/internal/provider/attestation_data_source.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider @@ -98,9 +98,8 @@ func (d *AttestationDataSource) Schema(_ context.Context, _ datasource.SchemaReq "attestation_variant": newAttestationVariantAttributeSchema(attributeInput), "image": newImageAttributeSchema(attributeInput), "maa_url": schema.StringAttribute{ - MarkdownDescription: `For Azure only, the URL of the Microsoft Azure Attestation service. The MAA's policy needs to be patched manually to work with Constellation OS images. -See the [Constellation documentation](https://docs.edgeless.systems/constellation/workflows/terraform-provider#quick-setup) for more information.`, - Optional: true, + MarkdownDescription: "For Azure only, the URL of the Microsoft Azure Attestation service", + Optional: true, }, "insecure": schema.BoolAttribute{ MarkdownDescription: "DON'T USE IN PRODUCTION Skip the signature verification when fetching measurements for the image.", @@ -126,15 +125,6 @@ func (d *AttestationDataSource) ValidateConfig(ctx context.Context, req datasour ) return } - - if !data.MaaURL.IsNull() { - resp.Diagnostics.AddAttributeWarning( - path.Root("maa_url"), - "Ensure that the MAA's policy is patched", "When MAA is used, please ensure the MAA's policy is patche properly for use within Constellation. See https://docs.edgeless.systems/constellation/workflows/terraform-provider#quick-setup for more information.", - ) - return - } - if data.AttestationVariant.Equal(types.StringValue("azure-sev-snp")) && data.MaaURL.IsNull() { tflog.Info(ctx, "MAA URL not set, MAA fallback will be unavailable") } @@ -172,18 +162,15 @@ func (d *AttestationDataSource) Read(ctx context.Context, req datasource.ReadReq insecureFetch := data.Insecure.ValueBool() - latestVersions := attestationconfigapi.Entry{} - if attestationVariant.Equal(variant.AWSSEVSNP{}) || - attestationVariant.Equal(variant.AzureSEVSNP{}) || - attestationVariant.Equal(variant.AzureTDX{}) || - attestationVariant.Equal(variant.GCPSEVSNP{}) { - latestVersions, err = d.fetcher.FetchLatestVersion(ctx, attestationVariant) + snpVersions := attestationconfigapi.SEVSNPVersionAPI{} + if attestationVariant.Equal(variant.AzureSEVSNP{}) || attestationVariant.Equal(variant.AWSSEVSNP{}) { + snpVersions, err = d.fetcher.FetchSEVSNPVersionLatest(ctx, attestationVariant) if err != nil { resp.Diagnostics.AddError("Fetching SNP Version numbers", err.Error()) return } } - tfAttestation, err := convertToTfAttestation(attestationVariant, latestVersions) + tfAttestation, err := convertToTfAttestation(attestationVariant, snpVersions) if err != nil { resp.Diagnostics.AddError("Converting attestation", err.Error()) } diff --git a/terraform-provider-constellation/internal/provider/attestation_data_source_test.go b/terraform-provider-constellation/internal/provider/attestation_data_source_test.go index 3a0b9c578..4fed9fbe3 100644 --- a/terraform-provider-constellation/internal/provider/attestation_data_source_test.go +++ b/terraform-provider-constellation/internal/provider/attestation_data_source_test.go @@ -1,14 +1,12 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider import ( - "errors" - "strconv" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -18,30 +16,6 @@ func TestAccAttestationSource(t *testing.T) { // Set the path to the Terraform binary for acceptance testing when running under Bazel. bazelPreCheck := func() { bazelSetTerraformBinaryPath(t) } - assertNonZeroValue := func(attr string) resource.TestCheckFunc { - return resource.TestCheckResourceAttrWith( - "data.constellation_attestation.test", - attr, - func(value string) error { - parsedValue, err := strconv.ParseUint(value, 10, 8) - if err == nil && parsedValue == 0 { - return errors.New("expected non-zero value") - } - return err - }, - ) - } - assertUint8Value := func(attr string) resource.TestCheckFunc { - return resource.TestCheckResourceAttrWith( - "data.constellation_attestation.test", - attr, - func(value string) error { - _, err := strconv.ParseUint(value, 10, 8) - return err - }, - ) - } - testCases := map[string]resource.TestCase{ "azure sev-snp success": { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, @@ -62,10 +36,12 @@ func TestAccAttestationSource(t *testing.T) { `, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.variant", "azure-sev-snp"), - assertNonZeroValue("attestation.bootloader_version"), - assertNonZeroValue("attestation.microcode_version"), - assertNonZeroValue("attestation.snp_version"), - assertUint8Value("attestation.tee_version"), // the valid value is 0 at the moment + + resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.bootloader_version", "3"), + resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.microcode_version", "115"), + resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.snp_version", "8"), + resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.tee_version", "0"), + resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.azure_firmware_signer_config.accepted_key_digests.0", "0356215882a825279a85b300b0b742931d113bf7e32dde2e50ffde7ec743ca491ecdd7f336dc28a6e0b2bb57af7a44a3"), resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.azure_firmware_signer_config.enforcement_policy", "MAAFallback"), @@ -108,7 +84,7 @@ func TestAccAttestationSource(t *testing.T) { }, }, }, - "gcp sev-es succcess": { + "gcp sev-snp succcess": { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: bazelPreCheck, Steps: []resource.TestStep{ @@ -134,34 +110,6 @@ func TestAccAttestationSource(t *testing.T) { }, }, }, - "gcp sev-snp succcess": { - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - PreCheck: bazelPreCheck, - Steps: []resource.TestStep{ - { - Config: testingConfig + ` - data "constellation_attestation" "test" { - csp = "gcp" - attestation_variant = "gcp-sev-snp" - image = { - version = "v2.17.0" - reference = "v2.17.0" - short_path = "v2.17.0" - } - } - `, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.variant", "gcp-sev-snp"), - assertNonZeroValue("attestation.bootloader_version"), - assertNonZeroValue("attestation.microcode_version"), - assertNonZeroValue("attestation.snp_version"), - assertUint8Value("attestation.tee_version"), // the valid value is 0 at the moment - resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.1.expected", "3695dcc55e3aa34027c27793c85c723c697d708c42d1f73bd6fa4f26608a5b24"), - resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.1.warn_only", "true"), - ), - }, - }, - }, "STACKIT qemu-vtpm success": { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: bazelPreCheck, diff --git a/terraform-provider-constellation/internal/provider/cluster_resource.go b/terraform-provider-constellation/internal/provider/cluster_resource.go index ba7be3d43..a12fe38da 100644 --- a/terraform-provider-constellation/internal/provider/cluster_resource.go +++ b/terraform-provider-constellation/internal/provider/cluster_resource.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io" + "net" "net/url" "regexp" "strings" @@ -53,7 +54,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/spf13/afero" - "k8s.io/client-go/tools/clientcmd" ) var ( @@ -102,13 +102,9 @@ type ClusterResourceModel struct { Azure types.Object `tfsdk:"azure"` OpenStack types.Object `tfsdk:"openstack"` - OwnerID types.String `tfsdk:"owner_id"` - ClusterID types.String `tfsdk:"cluster_id"` - KubeConfig types.String `tfsdk:"kubeconfig"` - Host types.String `tfsdk:"host"` - ClientCertificate types.String `tfsdk:"client_certificate"` - ClientKey types.String `tfsdk:"client_key"` - ClusterCACertificate types.String `tfsdk:"cluster_ca_certificate"` + OwnerID types.String `tfsdk:"owner_id"` + ClusterID types.String `tfsdk:"cluster_id"` + KubeConfig types.String `tfsdk:"kubeconfig"` } // networkConfigAttribute is the network config attribute's data model. @@ -421,8 +417,8 @@ func (r *ClusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re }, }, "kubeconfig": schema.StringAttribute{ - MarkdownDescription: "The kubeconfig (file) of the cluster.", - Description: "The kubeconfig (file) of the cluster.", + MarkdownDescription: "The kubeconfig of the cluster.", + Description: "The kubeconfig of the cluster.", Computed: true, Sensitive: true, PlanModifiers: []planmodifier.String{ @@ -430,43 +426,6 @@ func (r *ClusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re stringplanmodifier.UseStateForUnknown(), }, }, - "host": schema.StringAttribute{ - MarkdownDescription: "The host of the cluster.", - Description: "The host of the cluster.", - Computed: true, - PlanModifiers: []planmodifier.String{ - // We know that this value will never change after creation, so we can use the state value for upgrades. - stringplanmodifier.UseStateForUnknown(), - }, - }, - "client_certificate": schema.StringAttribute{ - MarkdownDescription: "The client certificate of the cluster.", - Description: "The client certificate of the cluster.", - Computed: true, - PlanModifiers: []planmodifier.String{ - // We know that this value will never change after creation, so we can use the state value for upgrades. - stringplanmodifier.UseStateForUnknown(), - }, - }, - "client_key": schema.StringAttribute{ - MarkdownDescription: "The client key of the cluster.", - Description: "The client key of the cluster.", - Computed: true, - Sensitive: true, - PlanModifiers: []planmodifier.String{ - // We know that this value will never change after creation, so we can use the state value for upgrades. - stringplanmodifier.UseStateForUnknown(), - }, - }, - "cluster_ca_certificate": schema.StringAttribute{ - MarkdownDescription: "The cluster CA certificate of the cluster.", - Description: "The cluster CA certificate of the cluster.", - Computed: true, - PlanModifiers: []planmodifier.String{ - // We know that this value will never change after creation, so we can use the state value for upgrades. - stringplanmodifier.UseStateForUnknown(), - }, - }, }, } } @@ -549,7 +508,7 @@ func (r *ClusterResource) Configure(_ context.Context, req resource.ConfigureReq } newDialer := func(validator atls.Validator) *dialer.Dialer { - return dialer.New(nil, validator, nil) + return dialer.New(nil, validator, &net.Dialer{}) } r.newApplier = func(ctx context.Context, _ atls.Validator) *constellation.Applier { @@ -1093,7 +1052,6 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel, DeployCSIDriver: microserviceCfg.CSIDriver, masterSecret: secrets.masterSecret, serviceAccURI: serviceAccURI, - serviceCIDR: networkCfg.IPCidrService.ValueString(), } if csp == cloudprovider.OpenStack { payload.openStackHelmValues = &helm.OpenStackValues{ @@ -1213,45 +1171,7 @@ func (r *ClusterResource) runInitRPC(ctx context.Context, applier *constellation } // Save data from init response into the Terraform state - - // Save the raw kubeconfig file. data.KubeConfig = types.StringValue(string(initOutput.Kubeconfig)) - - // Unmarshal the kubeconfig to get the fine-grained values. - kubeconfig, err := clientcmd.Load(initOutput.Kubeconfig) - if err != nil { - diags.AddError("Unmarshalling kubeconfig", err.Error()) - return diags - } - - clusterContext, ok := kubeconfig.Contexts[kubeconfig.CurrentContext] - if !ok { - diags.AddError("Getting cluster context", - fmt.Sprintf("Context %s not found in kubeconfig", kubeconfig.CurrentContext)) - return diags - } - - cluster, ok := kubeconfig.Clusters[clusterContext.Cluster] - if !ok { - diags.AddError("Getting cluster", - fmt.Sprintf("Cluster %s not found in kubeconfig", clusterContext.Cluster)) - return diags - } - - data.Host = types.StringValue(cluster.Server) - data.ClusterCACertificate = types.StringValue(string(cluster.CertificateAuthorityData)) - - authInfo, ok := kubeconfig.AuthInfos[clusterContext.AuthInfo] - if !ok { - diags.AddError("Getting auth info", - fmt.Sprintf("Auth info %s not found in kubeconfig", clusterContext.AuthInfo)) - return diags - } - - data.ClientCertificate = types.StringValue(string(authInfo.ClientCertificateData)) - data.ClientKey = types.StringValue(string(authInfo.ClientKeyData)) - - // Save other values from the init response. data.ClusterID = types.StringValue(initOutput.ClusterID) data.OwnerID = types.StringValue(initOutput.OwnerID) @@ -1268,7 +1188,6 @@ type applyHelmChartsPayload struct { masterSecret uri.MasterSecret // master secret of the cluster. serviceAccURI string // URI of the service account used within the cluster. openStackHelmValues *helm.OpenStackValues // OpenStack-specific Helm values. - serviceCIDR string // CIDR used for k8s services - needed for CoreDNS chart. } // applyHelmCharts applies the Helm charts to the cluster. @@ -1290,12 +1209,6 @@ func (r *ClusterResource) applyHelmCharts(ctx context.Context, applier *constell // The user has previously been warned about this when planning a microservice version change. AllowDestructive: helm.AllowDestructive, OpenStackValues: payload.openStackHelmValues, - ServiceCIDR: payload.serviceCIDR, - } - - if err := applier.AnnotateCoreDNSResources(ctx); err != nil { - diags.AddError("Annotating CoreDNS resources", err.Error()) - return diags } executor, _, err := applier.PrepareHelmCharts(options, state, @@ -1313,11 +1226,6 @@ func (r *ClusterResource) applyHelmCharts(ctx context.Context, applier *constell diags.AddError("Applying Helm charts", err.Error()) return diags } - - if err := applier.CleanupCoreDNSResources(ctx); err != nil { - diags.AddError("Cleaning up CoreDNS resources", err.Error()) - return diags - } return diags } diff --git a/terraform-provider-constellation/internal/provider/cluster_resource_test.go b/terraform-provider-constellation/internal/provider/cluster_resource_test.go index 7831c4c7e..fb1b5c4fc 100644 --- a/terraform-provider-constellation/internal/provider/cluster_resource_test.go +++ b/terraform-provider-constellation/internal/provider/cluster_resource_test.go @@ -1,12 +1,13 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider import ( + "context" "fmt" "regexp" "testing" @@ -95,14 +96,14 @@ func TestViolatedImageConstraint(t *testing.T) { Version: tc.version, } - input, diags := basetypes.NewObjectValueFrom(t.Context(), map[string]attr.Type{ + input, diags := basetypes.NewObjectValueFrom(context.Background(), map[string]attr.Type{ "version": basetypes.StringType{}, "reference": basetypes.StringType{}, "short_path": basetypes.StringType{}, "marketplace_image": basetypes.BoolType{}, }, img) require.Equal(t, 0, diags.ErrorsCount()) - _, _, diags2 := sut.getImageVersion(t.Context(), &ClusterResourceModel{ + _, _, diags2 := sut.getImageVersion(context.Background(), &ClusterResourceModel{ Image: input, }) require.Equal(t, tc.expectedErrorCount, diags2.ErrorsCount()) diff --git a/terraform-provider-constellation/internal/provider/convert.go b/terraform-provider-constellation/internal/provider/convert.go index 09161c776..087728168 100644 --- a/terraform-provider-constellation/internal/provider/convert.go +++ b/terraform-provider-constellation/internal/provider/convert.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider @@ -17,7 +17,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/config" - "github.com/edgelesssys/constellation/v2/internal/encoding" ) // naming schema: @@ -111,22 +110,18 @@ func convertFromTfAttestationCfg(tfAttestation attestationAttribute, attestation attestationConfig = &config.AzureTDX{ Measurements: c11nMeasurements, - QESVN: newVersion(tfAttestation.TDX.QESVN), - PCESVN: newVersion(tfAttestation.TDX.PCESVN), - TEETCBSVN: newVersion(encoding.HexBytes(teeTCBSVN)), - QEVendorID: newVersion(encoding.HexBytes(qeVendorID)), + QESVN: tfAttestation.TDX.QESVN, + PCESVN: tfAttestation.TDX.PCESVN, + TEETCBSVN: teeTCBSVN, + QEVendorID: qeVendorID, MRSeam: mrSeam, - XFAM: newVersion(encoding.HexBytes(xfam)), + XFAM: xfam, IntelRootKey: rootKey, } case variant.GCPSEVES{}: attestationConfig = &config.GCPSEVES{ Measurements: c11nMeasurements, } - case variant.GCPSEVSNP{}: - attestationConfig = &config.GCPSEVSNP{ - Measurements: c11nMeasurements, - } case variant.QEMUVTPM{}: attestationConfig = &config.QEMUVTPM{ Measurements: c11nMeasurements, @@ -138,9 +133,13 @@ func convertFromTfAttestationCfg(tfAttestation attestationAttribute, attestation } // convertToTfAttestationCfg converts the constellation attestation config to the related terraform structs. -func convertToTfAttestation(attVar variant.Variant, latestVersions attestationconfigapi.Entry) (tfAttestation attestationAttribute, err error) { +func convertToTfAttestation(attVar variant.Variant, snpVersions attestationconfigapi.SEVSNPVersionAPI) (tfAttestation attestationAttribute, err error) { tfAttestation = attestationAttribute{ - Variant: attVar.String(), + Variant: attVar.String(), + BootloaderVersion: snpVersions.Bootloader, + TEEVersion: snpVersions.TEE, + SNPVersion: snpVersions.SNP, + MicrocodeVersion: snpVersions.Microcode, } switch attVar { @@ -150,21 +149,6 @@ func convertToTfAttestation(attVar variant.Variant, latestVersions attestationco return tfAttestation, err } tfAttestation.AMDRootKey = certStr - tfAttestation.BootloaderVersion = latestVersions.Bootloader - tfAttestation.TEEVersion = latestVersions.TEE - tfAttestation.SNPVersion = latestVersions.SNP - tfAttestation.MicrocodeVersion = latestVersions.Microcode - - case variant.GCPSEVSNP{}: - certStr, err := certAsString(config.DefaultForGCPSEVSNP().AMDRootKey) - if err != nil { - return tfAttestation, err - } - tfAttestation.AMDRootKey = certStr - tfAttestation.BootloaderVersion = latestVersions.Bootloader - tfAttestation.TEEVersion = latestVersions.TEE - tfAttestation.SNPVersion = latestVersions.SNP - tfAttestation.MicrocodeVersion = latestVersions.Microcode case variant.AzureSEVSNP{}: certStr, err := certAsString(config.DefaultForAzureSEVSNP().AMDRootKey) @@ -172,10 +156,6 @@ func convertToTfAttestation(attVar variant.Variant, latestVersions attestationco return tfAttestation, err } tfAttestation.AMDRootKey = certStr - tfAttestation.BootloaderVersion = latestVersions.Bootloader - tfAttestation.TEEVersion = latestVersions.TEE - tfAttestation.SNPVersion = latestVersions.SNP - tfAttestation.MicrocodeVersion = latestVersions.Microcode firmwareCfg := config.DefaultForAzureSEVSNP().FirmwareSignerConfig tfFirmwareCfg, err := convertToTfFirmwareCfg(firmwareCfg) @@ -183,19 +163,24 @@ func convertToTfAttestation(attVar variant.Variant, latestVersions attestationco return tfAttestation, err } tfAttestation.AzureSNPFirmwareSignerConfig = tfFirmwareCfg - case variant.AzureTDX{}: - certStr, err := certAsString(config.DefaultForAzureTDX().IntelRootKey) + tdxCfg := config.DefaultForAzureTDX() + certStr, err := certAsString(tdxCfg.IntelRootKey) if err != nil { return tfAttestation, err } - tfAttestation.TDX.IntelRootKey = certStr - tfAttestation.TDX.PCESVN = latestVersions.PCESVN - tfAttestation.TDX.QESVN = latestVersions.QESVN - tfAttestation.TDX.TEETCBSVN = hex.EncodeToString(latestVersions.TEETCBSVN[:]) - tfAttestation.TDX.QEVendorID = hex.EncodeToString(latestVersions.QEVendorID[:]) - tfAttestation.TDX.XFAM = hex.EncodeToString(latestVersions.XFAM[:]) + tfTdxCfg := tdxConfigAttribute{ + IntelRootKey: certStr, + // TODO(AB#3798): Load these values dynamically from our attestation API + QESVN: tdxCfg.QESVN, + PCESVN: tdxCfg.PCESVN, + TEETCBSVN: hex.EncodeToString(tdxCfg.TEETCBSVN), + QEVendorID: hex.EncodeToString(tdxCfg.QEVendorID), + MRSeam: hex.EncodeToString(tdxCfg.MRSeam), + XFAM: hex.EncodeToString(tdxCfg.XFAM), + } + tfAttestation.TDX = tfTdxCfg case variant.GCPSEVES{}, variant.QEMUVTPM{}: // no additional fields default: @@ -255,8 +240,8 @@ func convertToTfMeasurements(m measurements.M) map[string]measurementAttribute { return tfMeasurements } -func newVersion[T uint8 | uint16 | encoding.HexBytes](v T) config.AttestationVersion[T] { - return config.AttestationVersion[T]{ +func newVersion(v uint8) config.AttestationVersion { + return config.AttestationVersion{ Value: v, } } diff --git a/terraform-provider-constellation/internal/provider/convert_test.go b/terraform-provider-constellation/internal/provider/convert_test.go index 9efa14f38..2e29378a3 100644 --- a/terraform-provider-constellation/internal/provider/convert_test.go +++ b/terraform-provider-constellation/internal/provider/convert_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider diff --git a/terraform-provider-constellation/internal/provider/image_data_source.go b/terraform-provider-constellation/internal/provider/image_data_source.go index 33288d6a8..6ed11c363 100644 --- a/terraform-provider-constellation/internal/provider/image_data_source.go +++ b/terraform-provider-constellation/internal/provider/image_data_source.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider diff --git a/terraform-provider-constellation/internal/provider/image_data_source_test.go b/terraform-provider-constellation/internal/provider/image_data_source_test.go index 789d169e0..669899e39 100644 --- a/terraform-provider-constellation/internal/provider/image_data_source_test.go +++ b/terraform-provider-constellation/internal/provider/image_data_source_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider @@ -125,7 +125,7 @@ func TestAccImageDataSource(t *testing.T) { }, }, }, - "gcp sev-es success": { + "gcp success": { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: bazelPreCheck, Steps: []resource.TestStep{ @@ -141,23 +141,6 @@ func TestAccImageDataSource(t *testing.T) { }, }, }, - // TODO(msanft): Enable once v2.17.0 is available - // "gcp sev-snp success": { - // ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - // PreCheck: bazelPreCheck, - // Steps: []resource.TestStep{ - // { - // Config: testingConfig + ` - // data "constellation_image" "test" { - // version = "v2.17.0" - // attestation_variant = "gcp-sev-snp" - // csp = "gcp" - // } - // `, - // Check: resource.TestCheckResourceAttr("data.constellation_image.test", "image.reference", "projects/constellation-images/global/images/v2-13-0-gcp-sev-es-stable"), // should be immutable, - // }, - // }, - // }, "stackit success": { ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, PreCheck: bazelPreCheck, diff --git a/terraform-provider-constellation/internal/provider/provider.go b/terraform-provider-constellation/internal/provider/provider.go index 1605d1d60..b0eb86c39 100644 --- a/terraform-provider-constellation/internal/provider/provider.go +++ b/terraform-provider-constellation/internal/provider/provider.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ // The provider package implements the Constellation Terraform provider's diff --git a/terraform-provider-constellation/internal/provider/provider_test.go b/terraform-provider-constellation/internal/provider/provider_test.go index a96fdcc37..75c7ade73 100644 --- a/terraform-provider-constellation/internal/provider/provider_test.go +++ b/terraform-provider-constellation/internal/provider/provider_test.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider diff --git a/terraform-provider-constellation/internal/provider/shared_attributes.go b/terraform-provider-constellation/internal/provider/shared_attributes.go index f3938914d..b6f96cd17 100644 --- a/terraform-provider-constellation/internal/provider/shared_attributes.go +++ b/terraform-provider-constellation/internal/provider/shared_attributes.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package provider @@ -31,13 +31,12 @@ func newAttestationVariantAttributeSchema(t attributeType) schema.Attribute { " * `aws-nitro-tpm`\n" + " * `azure-sev-snp`\n" + " * `azure-tdx`\n" + - " * `gcp-sev-snp`\n" + " * `gcp-sev-es`\n" + " * `qemu-vtpm`\n", Required: isInput, Computed: !isInput, Validators: []validator.String{ - stringvalidator.OneOf("aws-sev-snp", "aws-nitro-tpm", "azure-sev-snp", "azure-tdx", "gcp-sev-es", "gcp-sev-snp", "qemu-vtpm"), + stringvalidator.OneOf("aws-sev-snp", "aws-nitro-tpm", "azure-sev-snp", "azure-tdx", "gcp-sev-es", "qemu-vtpm"), }, } } diff --git a/terraform-provider-constellation/main.go b/terraform-provider-constellation/main.go index cf0824343..447ecc4e6 100644 --- a/terraform-provider-constellation/main.go +++ b/terraform-provider-constellation/main.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package main diff --git a/terraform/BUILD.bazel b/terraform/BUILD.bazel index 88e71216d..f4f99f242 100644 --- a/terraform/BUILD.bazel +++ b/terraform/BUILD.bazel @@ -77,7 +77,6 @@ go_library( "infrastructure/aws/modules/public_private_subnet/output.tf", "infrastructure/openstack/modules/stackit_loadbalancer/main.tf", "infrastructure/openstack/modules/stackit_loadbalancer/variables.tf", - "infrastructure/iam/aws/alb_policy.json", ], importpath = "github.com/edgelesssys/constellation/v2/terraform", visibility = ["//visibility:public"], diff --git a/terraform/assets.go b/terraform/assets.go index 61ada6f1e..9e0d71842 100644 --- a/terraform/assets.go +++ b/terraform/assets.go @@ -1,7 +1,7 @@ /* Copyright (c) Edgeless Systems GmbH -SPDX-License-Identifier: BUSL-1.1 +SPDX-License-Identifier: AGPL-3.0-only */ package terraform diff --git a/terraform/infrastructure/aws/.terraform.lock.hcl b/terraform/infrastructure/aws/.terraform.lock.hcl index 4914aaf10..32ca13156 100644 --- a/terraform/infrastructure/aws/.terraform.lock.hcl +++ b/terraform/infrastructure/aws/.terraform.lock.hcl @@ -2,52 +2,52 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "5.98.0" - constraints = "5.98.0" + version = "5.37.0" + constraints = "5.37.0" hashes = [ - "h1:/RMObGCrfJlVoQCf9h88hFkSyLafDXnw6r0yi4gpO80=", - "h1:KgOCdSG6euSc2lquuFlISJU/CzQTRhAO7WoaASxLZRc=", - "h1:neMFK/kP1KT6cTGID+Tkkt8L7PsN9XqwrPDGXVw3WVY=", - "h1:tSqQC0adIJ0VWRrbChyEGjGuWKwibrz+/YJ2Q1ZOs2Y=", - "h1:tfWnOmzoWOvwOGlUx0HrxCfUZq3YHhlkeEbMccAYiec=", - "zh:23377bd90204b6203b904f48f53edcae3294eb072d8fc18a4531c0cde531a3a1", - "zh:2e55a6ea14cc43b08cf82d43063e96c5c2f58ee953c2628523d0ee918fe3b609", - "zh:4885a817c16fdaaeddc5031edc9594c1f300db0e5b23be7cd76a473e7dcc7b4f", - "zh:6ca7177ad4e5c9d93dee4be1ac0792b37107df04657fddfe0c976f36abdd18b5", - "zh:78bf8eb0a67bae5dede09666676c7a38c9fb8d1b80a90ba06cf36ae268257d6f", - "zh:874b5a99457a3f88e2915df8773120846b63d820868a8f43082193f3dc84adcb", - "zh:95e1e4cf587cde4537ac9dfee9e94270652c812ab31fce3a431778c053abf354", + "h1:6qJfvyWObjLPoUrEC8kNVAJ1ZFFrIgzC1xprMkkoSjo=", + "h1:CQeYyWigNz838zjXKYH9VDkpjqlGB0phcM742YXiNh4=", + "h1:WcdVLFBrCN1lP44ZzCSTR8e8p/4C9BQLAqdszE+jh4M=", + "h1:jy1tY8vUGirfcC9GwSS2Uf01GXcxnNnotvIK/WjI2WI=", + "h1:o6f/hNLxrONTw/QlRbBVctdXNI2aSfrght4wtng6rOU=", + "zh:00f40a3d9593476693a7a72d993fd289f7be374fe3f2799776c6296eb6ff890a", + "zh:1010a9fbf55852a8da3473de4ec0f1fcf29efa85d66f61cbe2b086dbbd7747ae", + "zh:103a5674d1eb1cff05fe35e9baa9875afd18d740868b63f9c0c25eadb5eb4eb7", + "zh:270ac1b7a1327c1456a43df44c0b5cc3e26ed6d8861a709adeea1da684a563f5", + "zh:424362c02c8917c0586f3dd49aca27b7e0c21f5a23374b7045e9be3b5646c028", + "zh:549fa2ea187964ab9a0c354310947ead30e09b3199db1ff377c21d7547d78299", + "zh:6492d2ccc7f7d60e83cd8b7244adc53f30efc17d84b1ffc1b8fd6c385f8255fd", + "zh:66fb7b3b8a357071d26c5996c16d426edf07502a05ac86f4a6f73646ee7d1bbb", + "zh:6ecc05fb466d06ea8945564d2cdb8c2a8827d8cfca1550e9fb7eac0e95920196", + "zh:7932360b627b211dad937d278a8692a6c52bd6c0a71e4ec9e94ccbe825053822", + "zh:97ed1b4a18842c4d56a735329e87b4ef91a47e820e5a5c3c2dd64e293408bfc8", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:a75145b58b241d64570803e6565c72467cd664633df32678755b51871f553e50", - "zh:aa31b13d0b0e8432940d6892a48b6268721fa54a02ed62ee42745186ee32f58d", - "zh:ae4565770f76672ce8e96528cbb66afdade1f91383123c079c7fdeafcb3d2877", - "zh:b99f042c45bf6aa69dd73f3f6d9cbe0b495b30442c526e0b3810089c059ba724", - "zh:bbb38e86d926ef101cefafe8fe090c57f2b1356eac9fc5ec81af310c50375897", - "zh:d03c89988ba4a0bd3cfc8659f951183ae7027aa8018a7ca1e53a300944af59cb", - "zh:d179ef28843fe663fc63169291a211898199009f0d3f63f0a6f65349e77727ec", + "zh:d5e022052011e1984b9c2f8bc5a6b05c909e3b5bf40c3baddf191bf90e3169c2", + "zh:d7e9488b2ce5904efb91c8577b3fe9b0cd599c4cd508f1f163f292930f54fdf0", + "zh:e57cd93d5cd81dd0f446076af6e47a53ce83df2947ec64ed39a1090d4bdf8f0b", ] } provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = "3.7.2" + version = "3.6.0" + constraints = "3.6.0" hashes = [ - "h1:0hcNr59VEJbhZYwuDE/ysmyTS0evkfcLarlni+zATPM=", - "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", - "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", - "h1:Lmv2TxyKKm9Vt4uxcPZHw1uf0Ax/yYizJlilbLSZN8E=", - "h1:hkKSY5xI4R1H4Yrg10HHbtOoxZif2dXa9HFPSbaVg5o=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", + "h1:5KeoKSVKVHJW308uiTgslxFbjQAdWzBGUFK68vgMRWY=", + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "h1:t0mRdJzegohRKhfdoQEJnv3JRISSezJRblN0HIe67vo=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ] } diff --git a/terraform/infrastructure/aws/main.tf b/terraform/infrastructure/aws/main.tf index 6c85d2817..9204e638e 100644 --- a/terraform/infrastructure/aws/main.tf +++ b/terraform/infrastructure/aws/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.98.0" + version = "5.37.0" } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -29,7 +29,6 @@ locals { { name = "recovery", port = "9999", health_check = "TCP" }, { name = "join", port = "30090", health_check = "TCP" }, var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [], - var.emergency_ssh ? [{ name = "ssh", port = "22", health_check = "TCP" }] : [], ]) target_group_arns = { control-plane : [ @@ -49,20 +48,12 @@ locals { // example: given "name-1234567890.region.elb.amazonaws.com" it will return "*.region.elb.amazonaws.com" wildcard_lb_dns_name = replace(aws_lb.front_end.dns_name, "/^[^.]*\\./", "*.") - tags = merge( - var.additional_tags, - { constellation-uid = local.uid } - ) + tags = { + constellation-uid = local.uid, + } in_cluster_endpoint = aws_lb.front_end.dns_name out_of_cluster_endpoint = var.internal_load_balancer && var.debug ? module.jump_host[0].ip : local.in_cluster_endpoint - revision = 1 -} - -# A way to force replacement of resources if the provider does not want to replace them -# see: https://developer.hashicorp.com/terraform/language/resources/terraform-data#example-usage-data-for-replace_triggered_by -resource "terraform_data" "replacement" { - input = local.revision } resource "random_id" "uid" { @@ -221,5 +212,4 @@ module "jump_host" { ports = [for port in local.load_balancer_ports : port.port] security_groups = [aws_security_group.security_group.id] iam_instance_profile = var.iam_instance_profile_name_worker_nodes - additional_tags = var.additional_tags } diff --git a/terraform/infrastructure/aws/modules/instance_group/main.tf b/terraform/infrastructure/aws/modules/instance_group/main.tf index f2ae997ea..3a2e92fff 100644 --- a/terraform/infrastructure/aws/modules/instance_group/main.tf +++ b/terraform/infrastructure/aws/modules/instance_group/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.98.0" + version = "5.37.0" } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -51,6 +51,15 @@ resource "aws_launch_template" "launch_template" { # use "disabled" to disable SEV-SNP (but still require SNP-capable hardware) # use null to leave the setting unset (allows non-SNP-capable hardware to be used) amd_sev_snp = var.enable_snp ? "enabled" : null + # Disable SMT. We are already disabling it inside the image. + # Disabling SMT only in the image, not in the Hypervisor creates problems. + # Thus, also disable it in the Hypervisor. + # TODO(derpsteb): reenable once AWS confirms it's safe to do so. + # threads_per_core = 1 + # When setting threads_per_core we also have to set core_count. + # For the currently supported SNP instance families (C6a, M6a, R6a) default_cores + # equals the maximum number of available cores. + # core_count = data.aws_ec2_instance_type.instance_data.default_cores } lifecycle { diff --git a/terraform/infrastructure/aws/modules/jump_host/main.tf b/terraform/infrastructure/aws/modules/jump_host/main.tf index f2a34429e..dc3df3e2d 100644 --- a/terraform/infrastructure/aws/modules/jump_host/main.tf +++ b/terraform/infrastructure/aws/modules/jump_host/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.98.0" + version = "5.37.0" } } } @@ -26,9 +26,9 @@ resource "aws_instance" "jump_host" { subnet_id = var.subnet_id vpc_security_group_ids = var.security_groups - tags = merge(var.additional_tags, { - "Name" = "${var.base_name}-jump-host", - }) + tags = { + "Name" = "${var.base_name}-jump-host" + } user_data = < o + dynamic "security_rule" { + for_each = concat( + local.ports, + [{ name = "nodeports", port = local.ports_node_range, priority = 200 }] + ) + content { + name = security_rule.value.name + priority = security_rule.value.priority + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = security_rule.value.port + source_address_prefix = "*" + destination_address_prefix = "*" + } } - name = "${each.value.name}-new" - priority = each.value.priority + 10 # offset to not overlap with old rules - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = each.value.port - source_address_prefix = "*" - destination_address_prefix = "*" - resource_group_name = var.resource_group - network_security_group_name = azurerm_network_security_group.security_group.name } module "scale_set_group" { @@ -270,8 +253,14 @@ module "scale_set_group" { image_id = var.image_id network_security_group_id = azurerm_network_security_group.security_group.id subnet_id = azurerm_subnet.node_subnet.id - backend_address_pool_ids = each.value.role == "control-plane" ? [module.loadbalancer_backend_control_plane.backendpool_id] : [] - marketplace_image = var.marketplace_image + backend_address_pool_ids = each.value.role == "control-plane" ? [ + azurerm_lb_backend_address_pool.all.id, + module.loadbalancer_backend_control_plane.backendpool_id + ] : [ + azurerm_lb_backend_address_pool.all.id, + module.loadbalancer_backend_worker.backendpool_id + ] + marketplace_image = var.marketplace_image } module "jump_host" { @@ -283,7 +272,6 @@ module "jump_host" { subnet_id = azurerm_subnet.loadbalancer_subnet[0].id ports = [for port in local.ports : port.port] lb_internal_ip = azurerm_lb.loadbalancer.frontend_ip_configuration[0].private_ip_address - tags = var.additional_tags } data "azurerm_subscription" "current" { diff --git a/terraform/infrastructure/azure/modules/jump_host/main.tf b/terraform/infrastructure/azure/modules/jump_host/main.tf index ba7d5f726..74a540588 100644 --- a/terraform/infrastructure/azure/modules/jump_host/main.tf +++ b/terraform/infrastructure/azure/modules/jump_host/main.tf @@ -3,7 +3,6 @@ resource "azurerm_linux_virtual_machine" "jump_host" { resource_group_name = var.resource_group location = var.location size = "Standard_D2as_v5" - tags = var.tags network_interface_ids = [ azurerm_network_interface.jump_host.id, @@ -29,7 +28,7 @@ resource "azurerm_linux_virtual_machine" "jump_host" { } boot_diagnostics { - storage_account_uri = null + } user_data = base64encode(< name... @@ -50,13 +48,6 @@ locals { ] in_cluster_endpoint = var.internal_load_balancer ? google_compute_address.loadbalancer_ip_internal[0].address : google_compute_global_address.loadbalancer_ip[0].address out_of_cluster_endpoint = var.debug && var.internal_load_balancer ? module.jump_host[0].ip : local.in_cluster_endpoint - revision = 1 -} - -# A way to force replacement of resources if the provider does not want to replace them -# see: https://developer.hashicorp.com/terraform/language/resources/terraform-data#example-usage-data-for-replace_triggered_by -resource "terraform_data" "replacement" { - input = local.revision } resource "random_id" "uid" { @@ -81,10 +72,12 @@ resource "google_compute_subnetwork" "vpc_subnetwork" { description = "Constellation VPC subnetwork" network = google_compute_network.vpc_network.id ip_cidr_range = local.cidr_vpc_subnet_nodes - secondary_ip_range { - range_name = local.name - ip_cidr_range = local.cidr_vpc_subnet_pods - } + secondary_ip_range = [ + { + range_name = local.name, + ip_cidr_range = local.cidr_vpc_subnet_pods, + } + ] } resource "google_compute_subnetwork" "proxy_subnet" { @@ -161,29 +154,27 @@ resource "google_compute_firewall" "firewall_internal_pods" { } module "instance_group" { - source = "./modules/instance_group" - for_each = var.node_groups - base_name = local.name - node_group_name = each.key - role = each.value.role - zone = each.value.zone - uid = local.uid - instance_type = each.value.instance_type - initial_count = each.value.initial_count - image_id = var.image_id - disk_size = each.value.disk_size - disk_type = each.value.disk_type - network = google_compute_network.vpc_network.id - subnetwork = google_compute_subnetwork.vpc_subnetwork.id - alias_ip_range_name = google_compute_subnetwork.vpc_subnetwork.secondary_ip_range[0].range_name - kube_env = local.kube_env - debug = var.debug - named_ports = each.value.role == "control-plane" ? local.control_plane_named_ports : [] - labels = local.labels - init_secret_hash = local.init_secret_hash - custom_endpoint = var.custom_endpoint - cc_technology = var.cc_technology - iam_service_account_vm = var.iam_service_account_vm + source = "./modules/instance_group" + for_each = var.node_groups + base_name = local.name + node_group_name = each.key + role = each.value.role + zone = each.value.zone + uid = local.uid + instance_type = each.value.instance_type + initial_count = each.value.initial_count + image_id = var.image_id + disk_size = each.value.disk_size + disk_type = each.value.disk_type + network = google_compute_network.vpc_network.id + subnetwork = google_compute_subnetwork.vpc_subnetwork.id + alias_ip_range_name = google_compute_subnetwork.vpc_subnetwork.secondary_ip_range[0].range_name + kube_env = local.kube_env + debug = var.debug + named_ports = each.value.role == "control-plane" ? local.control_plane_named_ports : [] + labels = local.labels + init_secret_hash = local.init_secret_hash + custom_endpoint = var.custom_endpoint } resource "google_compute_address" "loadbalancer_ip_internal" { @@ -193,7 +184,6 @@ resource "google_compute_address" "loadbalancer_ip_internal" { subnetwork = google_compute_subnetwork.ilb_subnet[0].id purpose = "SHARED_LOADBALANCER_VIP" address_type = "INTERNAL" - labels = local.labels } resource "google_compute_global_address" "loadbalancer_ip" { @@ -236,7 +226,7 @@ module "jump_host" { base_name = local.name zone = var.zone subnetwork = google_compute_subnetwork.vpc_subnetwork.id - labels = var.additional_labels + labels = local.labels lb_internal_ip = google_compute_address.loadbalancer_ip_internal[0].address ports = [for port in local.control_plane_named_ports : port.port] } diff --git a/terraform/infrastructure/gcp/modules/instance_group/main.tf b/terraform/infrastructure/gcp/modules/instance_group/main.tf index b24f5d61f..2681c4d47 100644 --- a/terraform/infrastructure/gcp/modules/instance_group/main.tf +++ b/terraform/infrastructure/gcp/modules/instance_group/main.tf @@ -2,12 +2,12 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.36.0" + version = "5.17.0" } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -33,13 +33,8 @@ resource "google_compute_instance_template" "template" { confidential_instance_config { enable_confidential_compute = true - confidential_instance_type = var.cc_technology == "SEV_SNP" ? "SEV_SNP" : null } - # If SEV-SNP is used, we have to explicitly select a Milan processor, as per - # https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance_template#confidential_instance_type - min_cpu_platform = var.cc_technology == "SEV_SNP" ? "AMD Milan" : null - disk { disk_size_gb = 10 source_image = var.image_id @@ -61,7 +56,7 @@ resource "google_compute_instance_template" "template" { metadata = { kube-env = var.kube_env constellation-init-secret-hash = var.init_secret_hash - serial-port-enable = "TRUE" + serial-port-enable = var.debug ? "TRUE" : "FALSE" } network_interface { @@ -77,11 +72,17 @@ resource "google_compute_instance_template" "template" { on_host_maintenance = "TERMINATE" } - # Define all IAM access via the service account and not via scopes: - # See: https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance_template#nested_service_account service_account { - email = var.iam_service_account_vm - scopes = ["cloud-platform"] + scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring.write", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/cloud-platform", + ] } shielded_instance_config { diff --git a/terraform/infrastructure/gcp/modules/instance_group/variables.tf b/terraform/infrastructure/gcp/modules/instance_group/variables.tf index e4d2cbe5c..f4b9a7cdb 100644 --- a/terraform/infrastructure/gcp/modules/instance_group/variables.tf +++ b/terraform/infrastructure/gcp/modules/instance_group/variables.tf @@ -99,18 +99,3 @@ variable "custom_endpoint" { type = string description = "Custom endpoint to use for the Kubernetes API server. If not set, the default endpoint will be used." } - -variable "cc_technology" { - type = string - description = "The confidential computing technology to use for the nodes. One of `SEV`, `SEV_SNP`." - validation { - condition = contains(["SEV", "SEV_SNP"], var.cc_technology) - error_message = "The confidential computing technology has to be 'SEV' or 'SEV_SNP'." - } -} - -variable "iam_service_account_vm" { - type = string - default = "" - description = "IAM service account used for the VMs" -} diff --git a/terraform/infrastructure/gcp/modules/internal_load_balancer/main.tf b/terraform/infrastructure/gcp/modules/internal_load_balancer/main.tf index 7c73433ce..2589ba1be 100644 --- a/terraform/infrastructure/gcp/modules/internal_load_balancer/main.tf +++ b/terraform/infrastructure/gcp/modules/internal_load_balancer/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.36.0" + version = "5.17.0" } } } diff --git a/terraform/infrastructure/gcp/modules/jump_host/main.tf b/terraform/infrastructure/gcp/modules/jump_host/main.tf index 3b5682526..a0a2e4c4f 100644 --- a/terraform/infrastructure/gcp/modules/jump_host/main.tf +++ b/terraform/infrastructure/gcp/modules/jump_host/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.36.0" + version = "5.17.0" } } } diff --git a/terraform/infrastructure/gcp/modules/loadbalancer/main.tf b/terraform/infrastructure/gcp/modules/loadbalancer/main.tf index 0ee6bc477..0a5074f53 100644 --- a/terraform/infrastructure/gcp/modules/loadbalancer/main.tf +++ b/terraform/infrastructure/gcp/modules/loadbalancer/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.36.0" + version = "5.17.0" } } } diff --git a/terraform/infrastructure/gcp/outputs.tf b/terraform/infrastructure/gcp/outputs.tf index 8525bdb13..cbdf9164a 100644 --- a/terraform/infrastructure/gcp/outputs.tf +++ b/terraform/infrastructure/gcp/outputs.tf @@ -45,11 +45,6 @@ output "ip_cidr_node" { description = "CIDR block of the node network." } -output "loadbalancer_address" { - value = var.internal_load_balancer ? google_compute_address.loadbalancer_ip_internal[0].address : google_compute_global_address.loadbalancer_ip[0].address - description = "Public loadbalancer address." -} - # GCP-specific outputs output "project" { diff --git a/terraform/infrastructure/gcp/variables.tf b/terraform/infrastructure/gcp/variables.tf index c29c24391..add9eeffa 100644 --- a/terraform/infrastructure/gcp/variables.tf +++ b/terraform/infrastructure/gcp/variables.tf @@ -60,29 +60,3 @@ variable "zone" { type = string description = "GCP zone to deploy the cluster in." } - -variable "cc_technology" { - type = string - description = "The confidential computing technology to use for the nodes. One of `SEV`, `SEV_SNP`." - validation { - condition = contains(["SEV", "SEV_SNP"], var.cc_technology) - error_message = "The confidential computing technology has to be 'SEV' or 'SEV_SNP'." - } -} - -variable "additional_labels" { - type = map(any) - default = {} - description = "Additional labels that should be given to created recources." -} - -variable "iam_service_account_vm" { - type = string - default = "" - description = "IAM service account used for the VMs" -} -variable "emergency_ssh" { - type = bool - default = false - description = "Wether to expose the SSH port through the public load balancer." -} diff --git a/terraform/infrastructure/iam/aws/.terraform.lock.hcl b/terraform/infrastructure/iam/aws/.terraform.lock.hcl index 4914aaf10..afd0d6215 100644 --- a/terraform/infrastructure/iam/aws/.terraform.lock.hcl +++ b/terraform/infrastructure/iam/aws/.terraform.lock.hcl @@ -2,52 +2,51 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "5.98.0" - constraints = "5.98.0" + version = "5.37.0" + constraints = "5.37.0" hashes = [ - "h1:/RMObGCrfJlVoQCf9h88hFkSyLafDXnw6r0yi4gpO80=", - "h1:KgOCdSG6euSc2lquuFlISJU/CzQTRhAO7WoaASxLZRc=", - "h1:neMFK/kP1KT6cTGID+Tkkt8L7PsN9XqwrPDGXVw3WVY=", - "h1:tSqQC0adIJ0VWRrbChyEGjGuWKwibrz+/YJ2Q1ZOs2Y=", - "h1:tfWnOmzoWOvwOGlUx0HrxCfUZq3YHhlkeEbMccAYiec=", - "zh:23377bd90204b6203b904f48f53edcae3294eb072d8fc18a4531c0cde531a3a1", - "zh:2e55a6ea14cc43b08cf82d43063e96c5c2f58ee953c2628523d0ee918fe3b609", - "zh:4885a817c16fdaaeddc5031edc9594c1f300db0e5b23be7cd76a473e7dcc7b4f", - "zh:6ca7177ad4e5c9d93dee4be1ac0792b37107df04657fddfe0c976f36abdd18b5", - "zh:78bf8eb0a67bae5dede09666676c7a38c9fb8d1b80a90ba06cf36ae268257d6f", - "zh:874b5a99457a3f88e2915df8773120846b63d820868a8f43082193f3dc84adcb", - "zh:95e1e4cf587cde4537ac9dfee9e94270652c812ab31fce3a431778c053abf354", + "h1:6qJfvyWObjLPoUrEC8kNVAJ1ZFFrIgzC1xprMkkoSjo=", + "h1:CQeYyWigNz838zjXKYH9VDkpjqlGB0phcM742YXiNh4=", + "h1:WcdVLFBrCN1lP44ZzCSTR8e8p/4C9BQLAqdszE+jh4M=", + "h1:jy1tY8vUGirfcC9GwSS2Uf01GXcxnNnotvIK/WjI2WI=", + "h1:o6f/hNLxrONTw/QlRbBVctdXNI2aSfrght4wtng6rOU=", + "zh:00f40a3d9593476693a7a72d993fd289f7be374fe3f2799776c6296eb6ff890a", + "zh:1010a9fbf55852a8da3473de4ec0f1fcf29efa85d66f61cbe2b086dbbd7747ae", + "zh:103a5674d1eb1cff05fe35e9baa9875afd18d740868b63f9c0c25eadb5eb4eb7", + "zh:270ac1b7a1327c1456a43df44c0b5cc3e26ed6d8861a709adeea1da684a563f5", + "zh:424362c02c8917c0586f3dd49aca27b7e0c21f5a23374b7045e9be3b5646c028", + "zh:549fa2ea187964ab9a0c354310947ead30e09b3199db1ff377c21d7547d78299", + "zh:6492d2ccc7f7d60e83cd8b7244adc53f30efc17d84b1ffc1b8fd6c385f8255fd", + "zh:66fb7b3b8a357071d26c5996c16d426edf07502a05ac86f4a6f73646ee7d1bbb", + "zh:6ecc05fb466d06ea8945564d2cdb8c2a8827d8cfca1550e9fb7eac0e95920196", + "zh:7932360b627b211dad937d278a8692a6c52bd6c0a71e4ec9e94ccbe825053822", + "zh:97ed1b4a18842c4d56a735329e87b4ef91a47e820e5a5c3c2dd64e293408bfc8", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:a75145b58b241d64570803e6565c72467cd664633df32678755b51871f553e50", - "zh:aa31b13d0b0e8432940d6892a48b6268721fa54a02ed62ee42745186ee32f58d", - "zh:ae4565770f76672ce8e96528cbb66afdade1f91383123c079c7fdeafcb3d2877", - "zh:b99f042c45bf6aa69dd73f3f6d9cbe0b495b30442c526e0b3810089c059ba724", - "zh:bbb38e86d926ef101cefafe8fe090c57f2b1356eac9fc5ec81af310c50375897", - "zh:d03c89988ba4a0bd3cfc8659f951183ae7027aa8018a7ca1e53a300944af59cb", - "zh:d179ef28843fe663fc63169291a211898199009f0d3f63f0a6f65349e77727ec", + "zh:d5e022052011e1984b9c2f8bc5a6b05c909e3b5bf40c3baddf191bf90e3169c2", + "zh:d7e9488b2ce5904efb91c8577b3fe9b0cd599c4cd508f1f163f292930f54fdf0", + "zh:e57cd93d5cd81dd0f446076af6e47a53ce83df2947ec64ed39a1090d4bdf8f0b", ] } provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = "3.7.2" + version = "3.6.0" hashes = [ - "h1:0hcNr59VEJbhZYwuDE/ysmyTS0evkfcLarlni+zATPM=", - "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", - "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", - "h1:Lmv2TxyKKm9Vt4uxcPZHw1uf0Ax/yYizJlilbLSZN8E=", - "h1:hkKSY5xI4R1H4Yrg10HHbtOoxZif2dXa9HFPSbaVg5o=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", + "h1:5KeoKSVKVHJW308uiTgslxFbjQAdWzBGUFK68vgMRWY=", + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "h1:t0mRdJzegohRKhfdoQEJnv3JRISSezJRblN0HIe67vo=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ] } diff --git a/terraform/infrastructure/iam/aws/alb_policy.json b/terraform/infrastructure/iam/aws/alb_policy.json deleted file mode 100644 index fe1976170..000000000 --- a/terraform/infrastructure/iam/aws/alb_policy.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": "*", - "Condition": { - "StringEquals": { - "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeAccountAttributes", - "ec2:DescribeAddresses", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeInternetGateways", - "ec2:DescribeVpcs", - "ec2:DescribeVpcPeeringConnections", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeInstances", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeTags", - "ec2:GetCoipPoolUsage", - "ec2:DescribeCoipPools", - "ec2:GetSecurityGroupsForVpc", - "ec2:DescribeIpamPools", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeListenerCertificates", - "elasticloadbalancing:DescribeSSLPolicies", - "elasticloadbalancing:DescribeRules", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetGroupAttributes", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:DescribeTags", - "elasticloadbalancing:DescribeTrustStores", - "elasticloadbalancing:DescribeListenerAttributes", - "elasticloadbalancing:DescribeCapacityReservation" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "cognito-idp:DescribeUserPoolClient", - "acm:ListCertificates", - "acm:DescribeCertificate", - "iam:ListServerCertificates", - "iam:GetServerCertificate", - "waf-regional:GetWebACL", - "waf-regional:GetWebACLForResource", - "waf-regional:AssociateWebACL", - "waf-regional:DisassociateWebACL", - "wafv2:GetWebACL", - "wafv2:GetWebACLForResource", - "wafv2:AssociateWebACL", - "wafv2:DisassociateWebACL", - "shield:GetSubscriptionState", - "shield:DescribeProtection", - "shield:CreateProtection", - "shield:DeleteProtection" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:RevokeSecurityGroupIngress" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "ec2:CreateSecurityGroup" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "ec2:CreateTags" - ], - "Resource": "arn:aws:ec2:*:*:security-group/*", - "Condition": { - "StringEquals": { - "ec2:CreateAction": "CreateSecurityGroup" - }, - "Null": { - "aws:RequestTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "ec2:CreateTags", - "ec2:DeleteTags" - ], - "Resource": "arn:aws:ec2:*:*:security-group/*", - "Condition": { - "Null": { - "aws:RequestTag/elbv2.k8s.aws/cluster": "true", - "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:RevokeSecurityGroupIngress", - "ec2:DeleteSecurityGroup" - ], - "Resource": "*", - "Condition": { - "Null": { - "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateTargetGroup" - ], - "Resource": "*", - "Condition": { - "Null": { - "aws:RequestTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:CreateRule", - "elasticloadbalancing:DeleteRule" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:AddTags", - "elasticloadbalancing:RemoveTags" - ], - "Resource": [ - "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", - "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", - "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" - ], - "Condition": { - "Null": { - "aws:RequestTag/elbv2.k8s.aws/cluster": "true", - "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:AddTags", - "elasticloadbalancing:RemoveTags" - ], - "Resource": [ - "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", - "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", - "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", - "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:SetIpAddressType", - "elasticloadbalancing:SetSecurityGroups", - "elasticloadbalancing:SetSubnets", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:ModifyTargetGroupAttributes", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:ModifyListenerAttributes", - "elasticloadbalancing:ModifyCapacityReservation", - "elasticloadbalancing:ModifyIpPools" - ], - "Resource": "*", - "Condition": { - "Null": { - "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:AddTags" - ], - "Resource": [ - "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", - "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", - "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" - ], - "Condition": { - "StringEquals": { - "elasticloadbalancing:CreateAction": [ - "CreateTargetGroup", - "CreateLoadBalancer" - ] - }, - "Null": { - "aws:RequestTag/elbv2.k8s.aws/cluster": "false" - } - } - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:DeregisterTargets" - ], - "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*" - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:SetWebAcl", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:AddListenerCertificates", - "elasticloadbalancing:RemoveListenerCertificates", - "elasticloadbalancing:ModifyRule", - "elasticloadbalancing:SetRulePriorities" - ], - "Resource": "*" - } - ] -} diff --git a/terraform/infrastructure/iam/aws/main.tf b/terraform/infrastructure/iam/aws/main.tf index c1298689e..2394841b3 100644 --- a/terraform/infrastructure/iam/aws/main.tf +++ b/terraform/infrastructure/iam/aws/main.tf @@ -2,11 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.98.0" - } - random = { - source = "hashicorp/random" - version = "3.7.2" + version = "5.37.0" } } } @@ -246,20 +242,3 @@ resource "aws_iam_role_policy_attachment" "csi_driver_policy_control_plane" { role = aws_iam_role.control_plane_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" } - -// This policy is required by the AWS load balancer controller and can be found at -// https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/b44633a/docs/install/iam_policy.json. -resource "aws_iam_policy" "lb_policy" { - name = "${var.name_prefix}_lb_policy" - policy = file("${path.module}/alb_policy.json") -} - -resource "aws_iam_role_policy_attachment" "attach_lb_policy_worker" { - role = aws_iam_role.worker_node_role.name - policy_arn = aws_iam_policy.lb_policy.arn -} - -resource "aws_iam_role_policy_attachment" "attach_lb_policy_control_plane" { - role = aws_iam_role.control_plane_role.name - policy_arn = aws_iam_policy.lb_policy.arn -} diff --git a/terraform/infrastructure/iam/azure/.terraform.lock.hcl b/terraform/infrastructure/iam/azure/.terraform.lock.hcl index 931a0a0fb..65ca4053f 100644 --- a/terraform/infrastructure/iam/azure/.terraform.lock.hcl +++ b/terraform/infrastructure/iam/azure/.terraform.lock.hcl @@ -2,61 +2,49 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azuread" { - version = "3.4.0" - constraints = "3.4.0" + version = "2.43.0" + constraints = "2.43.0" hashes = [ - "h1:/+HxxFGciTGycNTDUPkcbALGsV+qcCPit8UyYX1Beho=", - "h1:2rAM1pT8sXeViTLMU7Tvd5sDSYuZeavFPVbe5xWgJ3A=", - "h1:CkN8DpmNYWU9mvlrmOpzFdPofGjKgrnL8leImrSL9Uk=", - "h1:D4wPDjiMNuWQcB1cYQIbS9M68QwQ2BQ8TdcamU3ig3k=", - "h1:ELINo/Jm4IDo5uZp1deObsOmcx3Lco1IEEowVVDzgtw=", - "h1:KeA9a60dssTVEFWkAuJ2lxztHyYB9bKmUfYanW2POSo=", - "h1:Rxzm2bElqWCmoNZvs/kjr4vPocw4v9Bw4HQHOtYWjzg=", - "h1:k4Gpl4uNxQgm8hDS3jjnzHEmp72Vrw6+bxNn4m4rxu0=", - "h1:mpZ2ZiLutwDNqOLoZwNKZgHSGn5vMvtA+00GKbaWb5U=", - "h1:qQNUA7kQIX30KkARDCpiZy1MFL/j9LiuSCzbOTHSsjo=", - "h1:rfO7hSYJLdpff/s2iuooHtxNacwKq5n03IwvVQ+xbSI=", - "zh:035a6d6e6aa7f117969702873c27344ec4ddd88f676cebc1088316fb26d5c95a", - "zh:11f86935174d8223699cae00b3a705ded1d75a4efb6d4723d3788f5446e1eaa5", - "zh:16d52b5bf8eefa98cd2793122be0c5a7b41767caedbd8a08786aeefb3d0c6856", + "h1:/wPCaaEC7By9zWMxYmPMiLNu+zuYuFUyl5mkhCwwi8w=", + "h1:83hGDTWccRJXsKGg1VeYJkBeHwE2cCTRKFCZud4iWQo=", + "h1:bp9HeofaEJDiWtyLMwIEYVgxP5yoMs/dQhjCYsbXU34=", + "h1:jmvCGhwc+jUip0Hy4PI1ZiO/11vdQ3TTp3YaBTKFGiQ=", + "h1:tU/kGFohqNia+uVFT1ujYKZRH2lvEP73LUhQDJtO1w4=", "zh:1c3e89cf19118fc07d7b04257251fc9897e722c16e0a0df7b07fcd261f8c12e7", - "zh:2fe201c7a1c17279f7674c160861296015d9b9d120de598999d169398ce285c9", - "zh:37bb91dff5b751f0c86a02a12980bdb5935d2ca6cdd249d9eef7eca619f628c0", - "zh:7533a35300e411893a024f858e722e50107dfd7212236d396ebf2ca2b13b7bcc", - "zh:88a95b2cb606439ae2f60ebe63a800580e232e94bc1b02ac7d25d25be10cb511", - "zh:c7b138b6bc34d8a1eff91742b38bce1718d9c50c343393fdfc918bef022ed74f", - "zh:cab09fda45b8a9a9896aedb22f5829745b7e9a01abb8077696bccb170fb01b5f", - "zh:cc4a29f074f1cc25f3abd3a41444f68307f3eb08c4d5f79f60a012b632c1ea05", - "zh:e30e9fe8e04271431cb730a1a888b6da5afeae385e2e53ff7b4114066c1250db", + "zh:2225e2e97ccba4ed1d84f1d430f1ebd837943fe187e57f24f1763172dda61556", + "zh:24708cb09411a766ff397e05cae49058ca38edc718db303a7faef9823402737d", + "zh:3a61167ff58d585abd56233731a8fd649c7c04272bd5b878f963883496e19192", + "zh:433f557634b5e663caaeb68c504c7771c186eba7ecf5d4030437956bc6599ecb", + "zh:5e8cc3b3bcc22d217cf588c821ce091c7d40f0815aecc1addde5355c17cb381d", + "zh:7b008c376097cd60259d43f58fcb33fee56fe9aebb4a94ed7958868ee501d7d0", + "zh:908907fd38537583ea60dccbf73055ae1a2963acc399be4f8e9a6616a9a537db", + "zh:966586cfd850606bab7dd2242c5b9e35d3a7178f64eaac0b44dea54c104c8169", + "zh:a624286401913d3ec44b4825e2c5ae38ac94fb4950aeed8f4b91d09c898f8cce", + "zh:b5171a4463fd0d9b0ce2a08605499b6d99fe93d6fc3f4143e9a26201065cc90a", + "zh:cdcfeeb9db4dbdc6f1fb5644453b37dbd0025b4f3127e9ff348f1e62d66b493e", ] } provider "registry.terraform.io/hashicorp/azurerm" { - version = "4.29.0" - constraints = "4.29.0" + version = "3.92.0" + constraints = "3.92.0" hashes = [ - "h1:Bde/KCh2xGVCBx/JnixC3I2fmoRTwHXgsapfQ5QG8eg=", - "h1:IyINmgNiLfWx3Istkt5Mz+IJrDhSMhj3/qQeJlC4qS4=", - "h1:KEJAt0mJAACyIKUB5mCk/wqtxKMhivdeW8w6byz5Ll4=", - "h1:Y4gTSs+ZE5YSJVXG2qmsbXmv9Daq5aGM8Ip/GE6nev8=", - "h1:YtcHvTVfVBKbMCp9esoj527R1UK/hU0Zmo3pyQb8YhQ=", - "h1:atJdgnuqk+w3v4Zzhw2B1FZeYYA4su9JfanwNsx+c8o=", - "h1:c9tmtEdVTb9siGa3hVxPrMVl9ij5zijnD02JMHcHjrE=", - "h1:eN0KhMGVepEPpSA+YN5Kaz/v9PFKCafbkqqBzpLJf+g=", - "h1:hNVKlXTx2duXnR6SNKtyQMx7zSIlrxBu66Z0gbyfv3c=", - "h1:jC2GJo4VzTKnKociUDLVv8/+u9Mz+4scZrqbEasV+Y0=", - "h1:m3xYvc9X0pec0Zd1dpn82ALQ+6vwz56RnF/3CbkI2Eg=", - "zh:16590eea68c7c8aedb7af19f690eb34ab6636ef417b3fa9e06ca038fdb4c44b8", - "zh:1c907dfe44d00a54aa63d443004add90188f9a53ef3e919aff8aba92f715f42b", - "zh:258a0ff4198d80cae33c89091cd556d84c1b522c4416458484f23719a0cdf4cc", - "zh:587f5e9b2b33e51b18fb0f372025c961c3f57f2958b388459dd8432412650bda", - "zh:6318ca03bd9dbac272a75bb193688c7d4c4b45c7460289820528f31bcd6c3fe9", - "zh:63e4e8128e26e4c3e0c3b6582ef527245eb35eb5c80ad278dc675ebdf71edeaf", - "zh:845c898a27a84a15ba26e95ee66ac9563f81bc672b5ca216af82d87fe09bd5f8", - "zh:8fa6434fa212d5501185f0adc985d3a3c1e0f449c78f040a4ca640cb1e809cac", - "zh:9b49c0d72ab19aab43b2b48d23c5dddbbe29afae1569a987e6f20ed4c80ddf4c", - "zh:b14cc1ee5e3acf52490de7dd9791cce7953c0ee4bcccf0306aafd256568bd69f", - "zh:cd444836b2579fa42bfca2ae6145d394c41b6438b1ae01078c060bfaf803bb4d", + "h1:+bZPRgjpUA6LivvMIS1UdwRWUgzoYBp/nhEpbL4aXHM=", + "h1:D5lngW1uKlPM2EUCdNG1f2FvPGHYRklDFN8b2jPCIpM=", + "h1:nTP2ZYfuEpMP+PkkgRdhQphNmWWJuKdE9Z4TzeC7ydo=", + "h1:sqVZftg9rJGDiiPiY9l1V/a+5CWkxNcj22sBu8HsJBY=", + "h1:swoRk2drVrD8v7GrW/2OJSk06v2I2zGk3XPAgBDbw9A=", + "zh:04292e149676ba956d738e85faeb6d6ebd3759e8310f1c4155e67402eb5ae0f7", + "zh:0963b4528f25d01d5c733e17de31e2c0b94790fd02931b2a47cd051b20dd0d96", + "zh:133563e16e8a4a7139ac11d94e68de8d1d5e3a62a532e64ac936735d7b1e04db", + "zh:2b219f1b40881d3bdd89257c916f255a7e36904ddc65dbbafee80763661b4636", + "zh:4b4e11a4e3716b290b3b173dfd15b06814b2f6f148f663e3c67a677c95526339", + "zh:5607c7bff3019c3b31488be1a8a9d77a96d27b199a1d8b789e4c2d4c90805674", + "zh:6469aef7728947dacb47785e6082d2d95ebd336a8798f3be6cece5a13145108c", + "zh:69e563f4e6397e1ebaef6f554d296238ec1d9dadc4b865c36743bd8366a888da", + "zh:887a223b7a9ec4e66634dbb65d9dcc53f0be06d058d9a209927ad49702ae790c", + "zh:b03c273367885c5489a24c31859af81ea58cb169431c0da97a175945ec968f53", + "zh:dd7b704ceaf98ce591e111a9c5085465c946f4f8f357089c0e27e990a669ba39", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } diff --git a/terraform/infrastructure/iam/azure/main.tf b/terraform/infrastructure/iam/azure/main.tf index fe1672165..c58eb12b5 100644 --- a/terraform/infrastructure/iam/azure/main.tf +++ b/terraform/infrastructure/iam/azure/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "4.29.0" + version = "3.92.0" } azuread = { source = "hashicorp/azuread" - version = "3.4.0" + version = "2.43.0" } } } @@ -18,10 +18,6 @@ provider "azurerm" { prevent_deletion_if_contains_resources = false } } - subscription_id = var.subscription_id - # This enables all resource providers. - # In the future, we might want to use `resource_providers_to_register` to registers just the ones we need. - resource_provider_registrations = "all" } # Configure Azure active directory provider diff --git a/terraform/infrastructure/iam/azure/variables.tf b/terraform/infrastructure/iam/azure/variables.tf index 28c75e840..4a63ba609 100644 --- a/terraform/infrastructure/iam/azure/variables.tf +++ b/terraform/infrastructure/iam/azure/variables.tf @@ -1,9 +1,3 @@ -variable "subscription_id" { - type = string - description = "Azure subscription ID. This can also be sourced from the ARM_SUBSCRIPTION_ID environment variable: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#subscription_id" - default = "" -} - variable "resource_group_name" { type = string description = "Name for the resource group the cluster should reside in." diff --git a/terraform/infrastructure/iam/gcp/.terraform.lock.hcl b/terraform/infrastructure/iam/gcp/.terraform.lock.hcl index 2fbbe32c2..2fcd905b4 100644 --- a/terraform/infrastructure/iam/gcp/.terraform.lock.hcl +++ b/terraform/infrastructure/iam/gcp/.terraform.lock.hcl @@ -2,54 +2,48 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/google" { - version = "6.36.0" - constraints = "6.36.0" + version = "5.17.0" + constraints = "5.17.0" hashes = [ - "h1:7UUG6owE+FafOHh0JK8BTM0yOlO7/XbHiEDh087fd0E=", - "h1:80HOG2KFPy07zgflMevKhsKrU8tEFvVncVMZOiffqV0=", - "h1:8aaMc8jT1GL3I15Aiz9Lr6k8+u79Fo3D6KA4Np+JUI8=", - "h1:aQ8E+XveXDO6zbqO1upcKqkrWr+5dwxowmQgMZ/zgrM=", - "h1:e/3n+WKR/ikY8m/MIAHOvH1HFY1fng76SEpnvGStxMU=", - "h1:fcjI1cXeKYJDWcgUXL+UPD23HIN4IDJjlc7XVD3i3Bc=", - "h1:iQx5xJnQvBN2aUdQbWZQPmtuT0xjfaNpFJD3qzOvC7U=", - "h1:kRbYOLtROkOjc3XMgptzFWKdgRCjyf2FH0UTlCJXkg8=", - "h1:lxi5P8TBMfiKk88FI4xM2Lnwlk2+Q3r9oa4yrg83x1w=", - "h1:m085EpkXRmg1TqRbPP6RxdVmlwJJujvyfMtznvURS04=", - "h1:vvvbnteZLIMosgsxO6YXDgxGYdU5BuSxPD1w4f4LV6c=", - "zh:0a67432c04d4c74829632cc5669b8c5988f837259333307e07c2915a0529b3fb", - "zh:4559afe21bb59e8fb9e3e7414ea65b6be233e8f217e8683dd0e9c347ecadf910", - "zh:58a34fe3b28271deba9a44db1e704c2844f2e30e252ded5f200a8f9af170d52d", - "zh:6b07f388b4fb2189d9fe3a058831e5c755092f7bd5f7388a1c0d9583f8c43ef0", - "zh:6fae25f93bf4a6fd59ce0f9d05e8551b65b4be7084f5e6e5f528ab011dbbef6a", - "zh:73be19906c569f1d46b8f88d3e846bd8dabd6ed65d8ba9a91f67da5365b534dc", - "zh:7b047330342f600e92c02e248f72eefae1a2e01c16ef45cc533942eb73c49c06", - "zh:9fecf5cdf1a16b9b9c1e83dc6129a40bfbc9e640252d2afddd34dc61de213330", - "zh:c557ea7357880615af290452ff06bc23bd3821132783240bd7cd75aadf16a04a", - "zh:dc11ad9a0b595b70490326563ea5f77de9c69aba0ee959971392e46b10a3c246", - "zh:ed8dc5d5ce3d8e3c1648c3fc6f48a19da9ed38d0157c33019c74b7790480c29f", + "h1:9DKCaGp9EFKDLWIOWI3yA/RgWTMh0EMD6+iggVXC9l0=", + "h1:JEfDiodirnMqwNaub/anXoOtWt68aEN80QtPJxg3jsc=", + "h1:TANQI64JuScQ2LTITQqz7eh1RjhYDItdbI5p1aBOtXY=", + "h1:dT3UftIyARC7YjS4yurPlNS7WJAHICDHMXSluAAvavA=", + "h1:lu84RYioCT4OxXbFBdqom4QvSPAjMkEyHPSIAxuS7oo=", + "zh:31b4d485ee66e6ff2eb1d8e476e694904447ce2b7143a2e067e4b80a84958d13", + "zh:32e86a51c4b0b29b7a18dd95616ea2976f08a4a7385e00f2bcab266217ee4320", + "zh:357f352bf04e7bc10d61d49296bf6503f31a3db0500169cb532afde7d318643e", + "zh:4b4637ca397cc771136edf7ec5578b5ab8631a8955a86d4fce3b8c40ca8c26b4", + "zh:4fe198b7427f7bf04270a5491a0352379c2b0a1caf12e206e6e224ceb085f56a", + "zh:7abb8509a61602d5ed4c801e7cd7c8299d109bc07980352251ba79880a99abab", + "zh:b1550fe08c650d8419860da1568d3f77093d269f880cad7d720d843b2a9ec545", + "zh:c91d7079646a3fdbb927085e368a16b221a23c17cf7455d5088f0c8f5da48c9f", + "zh:d367213a5f392852ef0708283df583703b2efd0b44f9e599cd055086c371cf74", + "zh:d5b557f294f4094a865afaa0611dc2e657d485b60903f12795eeedc2e1c3aa87", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fdad54c5e50751cef3f39a8666ff6adbb3bd860d396d5a9a0a3526e204f60454", ] } provider "registry.terraform.io/hashicorp/null" { - version = "3.2.3" + version = "3.2.2" hashes = [ - "h1:+AnORRgFbRO6qqcfaQyeX80W0eX3VmjadjnUFUJTiXo=", - "h1:I0Um8UkrMUb81Fxq/dxbr3HLP2cecTH2WMJiwKSrwQY=", - "h1:nKUqWEza6Lcv3xRlzeiRQrHtqvzX1BhIzjaOVXRYQXQ=", - "h1:obXguGZUWtNAO09f1f9Cb7hsPCOGXuGdN8bn/ohKRBQ=", - "h1:zxoDtu918XPWJ/Y6s4aFrZydn6SfqkRc5Ax1ZLnC6Ew=", - "zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2", - "zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d", - "zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3", - "zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f", - "zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1", + "h1:Gef5VGfobY5uokA5nV/zFvWeMNR2Pmq79DH94QnNZPM=", + "h1:IMVAUHKoydFrlPrl9OzasDnw/8ntZFerCC9iXw1rXQY=", + "h1:m467k2tZ9cdFFgHW7LPBK2GLPH43LC6wc3ppxr8yvoE=", + "h1:vWAsYRd7MjYr3adj8BVKRohVfHpWQdvkIwUQ2Jf5FVM=", + "h1:zT1ZbegaAYHwQa+QwIFugArWikRJI9dqohj8xb0GY88=", + "zh:3248aae6a2198f3ec8394218d05bd5e42be59f43a3a7c0b71c66ec0df08b69e7", + "zh:32b1aaa1c3013d33c245493f4a65465eab9436b454d250102729321a44c8ab9a", + "zh:38eff7e470acb48f66380a73a5c7cdd76cc9b9c9ba9a7249c7991488abe22fe3", + "zh:4c2f1faee67af104f5f9e711c4574ff4d298afaa8a420680b0cb55d7bbc65606", + "zh:544b33b757c0b954dbb87db83a5ad921edd61f02f1dc86c6186a5ea86465b546", + "zh:696cf785090e1e8cf1587499516b0494f47413b43cb99877ad97f5d0de3dc539", + "zh:6e301f34757b5d265ae44467d95306d61bef5e41930be1365f5a8dcf80f59452", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301", - "zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670", - "zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed", - "zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65", - "zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd", - "zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5", + "zh:913a929070c819e59e94bb37a2a253c228f83921136ff4a7aa1a178c7cce5422", + "zh:aa9015926cd152425dbf86d1abdbc74bfe0e1ba3d26b3db35051d7b9ca9f72ae", + "zh:bb04798b016e1e1d49bcc76d62c53b56c88c63d6f2dfe38821afef17c416a0e1", + "zh:c23084e1b23577de22603cff752e59128d83cfecc2e6819edadd8cf7a10af11e", ] } diff --git a/terraform/infrastructure/iam/gcp/main.tf b/terraform/infrastructure/iam/gcp/main.tf index 3c45be09e..899d448c9 100644 --- a/terraform/infrastructure/iam/gcp/main.tf +++ b/terraform/infrastructure/iam/gcp/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.36.0" + version = "5.17.0" } } } @@ -13,19 +13,8 @@ provider "google" { zone = var.zone } -locals { - sa_name = var.name_prefix == "" ? var.service_account_id : "${var.name_prefix}-sa" - sa_vm_name = var.name_prefix == "" ? "${var.service_account_id}-vm" : "${var.name_prefix}-sa-vm" -} - -resource "google_service_account" "vm" { - account_id = local.sa_vm_name - display_name = "Constellation service account for VMs" - description = "Service account used by the VMs" -} - resource "google_service_account" "service_account" { - account_id = local.sa_name + account_id = var.service_account_id display_name = "Constellation service account" description = "Service account used inside Constellation" } @@ -76,31 +65,6 @@ resource "google_project_iam_member" "iam_service_account_user_role" { depends_on = [null_resource.delay] } -resource "google_project_iam_custom_role" "vm" { - # role_id must not contain dashes - role_id = replace("${local.sa_vm_name}-role", "-", "_") - title = "Constellation IAM role for VMs" - description = "Constellation IAM role for VMs" - permissions = [ - "compute.instances.get", - "compute.instances.list", - "compute.subnetworks.get", - "compute.globalForwardingRules.list", - "compute.zones.list", - "compute.forwardingRules.list", - ] -} - -resource "google_project_iam_binding" "custom_role_vm_to_service_account_vm" { - project = var.project_id - role = "projects/${var.project_id}/roles/${google_project_iam_custom_role.vm.role_id}" - - members = [ - "serviceAccount:${google_service_account.vm.email}", - ] - depends_on = [null_resource.delay] -} - resource "google_service_account_key" "service_account_key" { service_account_id = google_service_account.service_account.name depends_on = [null_resource.delay] diff --git a/terraform/infrastructure/iam/gcp/outputs.tf b/terraform/infrastructure/iam/gcp/outputs.tf index 45d586de6..437261bb8 100644 --- a/terraform/infrastructure/iam/gcp/outputs.tf +++ b/terraform/infrastructure/iam/gcp/outputs.tf @@ -3,9 +3,3 @@ output "service_account_key" { description = "Private key of the service account." sensitive = true } - -output "service_account_mail_vm" { - value = google_service_account.vm.email - description = "Mail address of the service account to be attached to the VMs" - sensitive = false -} diff --git a/terraform/infrastructure/iam/gcp/variables.tf b/terraform/infrastructure/iam/gcp/variables.tf index 37dc35144..19c25d787 100644 --- a/terraform/infrastructure/iam/gcp/variables.tf +++ b/terraform/infrastructure/iam/gcp/variables.tf @@ -5,13 +5,7 @@ variable "project_id" { variable "service_account_id" { type = string - default = null - description = "[DEPRECATED use var.name_prefix] ID for the service account being created. Must match ^[a-z](?:[-a-z0-9]{4,28}[a-z0-9])$." -} - -variable "name_prefix" { - type = string - description = "Prefix to be used for all resources created by this module." + description = "ID for the service account being created. Must match ^[a-z](?:[-a-z0-9]{4,28}[a-z0-9])$." } variable "region" { diff --git a/terraform/infrastructure/openstack/.terraform.lock.hcl b/terraform/infrastructure/openstack/.terraform.lock.hcl index 292f31c77..60c4569da 100644 --- a/terraform/infrastructure/openstack/.terraform.lock.hcl +++ b/terraform/infrastructure/openstack/.terraform.lock.hcl @@ -2,93 +2,78 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = "3.7.2" + version = "3.6.0" + constraints = "3.6.0" hashes = [ - "h1:0hcNr59VEJbhZYwuDE/ysmyTS0evkfcLarlni+zATPM=", - "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", - "h1:Def/iHM4HihJCIxQ8AYoxtoVL5lVlYx0V7bX91pxwgM=", - "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", - "h1:Lmv2TxyKKm9Vt4uxcPZHw1uf0Ax/yYizJlilbLSZN8E=", - "h1:hkKSY5xI4R1H4Yrg10HHbtOoxZif2dXa9HFPSbaVg5o=", - "h1:khu3pu9zeUMd6Ev+yH6cQ1S4+xpzx8wqwwFwADYGeRI=", - "h1:l35vnL76rzaOjhhJQiaWviW0noK2YzHeHN0/vIXJnHk=", - "h1:nWZjMYzp+nsqD3xslcihzq1Enxv33a7iC8/I8CTBcHA=", - "h1:pSMn/cwmyHB6V67lToGmCHfJFfzA711vV+E1cGP0LBg=", - "h1:w+NoF7vNMFS+qrU2XUEm0/wnuIZxPC733qOOfLVOdfk=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", + "h1:5KeoKSVKVHJW308uiTgslxFbjQAdWzBGUFK68vgMRWY=", + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "h1:t0mRdJzegohRKhfdoQEJnv3JRISSezJRblN0HIe67vo=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ] } provider "registry.terraform.io/stackitcloud/stackit" { - version = "0.53.0" - constraints = "0.53.0" + version = "0.15.1" + constraints = "0.15.1" hashes = [ - "h1:BVPETE48KhoqyKYClDhUBstmFzRiAcuxe27hQZwDK+Q=", - "h1:FcdIzdfUZY6BPCCng9WImmsfJcRH+iAr8DgRI4Fia04=", - "h1:KhLsCQC08QvAiBFb1LhfpfNSTPUes1JJUOuTPmDbn2c=", - "h1:KpyF8wGtsxPKJjDla/r93FftL7qFCe/MtiN+1ug1+No=", - "h1:P6OSZsAasyZIgC2xllDldPv4Bvg6uXPPNzxkicTwuq0=", - "h1:P7uxrCcb0itwa44WmUpAfb/bpg74lT+yDiAcveM0aa8=", - "h1:QPX1BtGEBa51bKpsGPPqV0Ssuhn64xJcEohBtyEY3Pg=", - "h1:RmmMw2xkNYRFyuI3qqL1JpnIWSXLV51DSSZeil8Ec6w=", - "h1:RwiKzxuiJ+dOvaFQ/A92zLGz5JLN+0fA7riYFrNKZ9Y=", - "h1:To9R3LyIlT+S/6d/tcgQgxaKB3UWoaaNBAhbWs5wYGM=", - "h1:UQpT+MqZyPRX5usq1u77GUDTW5UFVcvh+fkDItGZfzM=", - "h1:bLlVOzfu3fje3M81Whny1yQiOC89r4x4Akw6JNzy2fk=", - "h1:hQasTcIA5yfYrgbelOcceatBDj8//shVvNfUAZyOOAc=", - "h1:xXlBF8c3btnkAIgBiipcpsvbMK6n2+/8OqNAGxBhni8=", + "h1:CUdva/dYmpT8++N6Ga2r4z592keQCFLnjfHPbNjegtQ=", + "h1:Ue1niRFNomhn2QRuXLc39gYs9VR6blZm31vV4h5DKlw=", + "h1:Vra5UFH8yFTaa/xykLJ1XzUSmSsFyhtT4xsiZy2uJiY=", + "h1:eWQwYVxuB8JFt3w95fNMP3l8UfRNTtX9RwcmkG7YhNU=", + "h1:ouS981NXWByi4I15QpypXdqza6p5TmqEJKGqPbE2QBQ=", + "zh:0673b539594ed62a1510036da5b15bb477fcd1d997cc4fd7ec82227c5a4b2a26", + "zh:0bd6afcebeeeea3a463fe3e6b5537f2f046ec2b8ae3e842984d9e30e2cdfd8e6", "zh:0dde99e7b343fa01f8eefc378171fb8621bedb20f59157d6cc8e3d46c738105f", - "zh:27df5dd8cd7af79080e071d8c3ef81d792ee7665b569255ec4c931fa5595b7b5", - "zh:452f8ee8dabc0bdf8ec623d186061750a527bb02225c9810f412c46d142bb73a", - "zh:48512ed6362d537687a74f5ddd36ed288e36b8f47ad6ead78c71f5152912c777", - "zh:4e4b2be9cda9f866b47bf4c9b3a9d3c9c8a0c6006d66e9a33f54317694ea48a1", - "zh:5239ec2377e1d186b465b07bd1c71793d7a142a1bfe155abdf84d60d8189b1bc", - "zh:5e506b9e423ff1c65482bd8dcee629cef0789b0879d1b2d61c1e478c4506b2eb", - "zh:705762fa6bfc02ccab39a8b544f9090f63d6e49364c09ac4a2f29878ff53235f", - "zh:882ceb507151aff47ec57808adb2a7104686be46ee34963eecb0fabfd771caf8", - "zh:ab2ba4430a0eb433ee6d0b99c738c6addbcab6bd921b7ed660d55fc979515c29", - "zh:b26b03356b44e6eacaa84aa4170aee4b9afe78f18c48ec209a6a1c0bfe7f4d47", - "zh:b77ddbc99c4a77b1c2410ba7526498be6ee723bb3b42c5dd4e712408119224bf", - "zh:c808c34807aeb34aa0e66ee9c25b523b398925682fa7c0f6b0115dd0e91b2c33", - "zh:d76c00fddbc80140825757f71b22d1c3a64978a4792b52bfde3747ee93f8e7fa", - "zh:fb03cb807d9817693a001bed3334c5636fd61fb745d611b14b013bfdcf066068", + "zh:1f97c5435e58072e7369df8510251c94d832d98d1ee0bc3acfa9c2ed533b178d", + "zh:282dad21d39d81f64e1749797c57961eb04c020374e83e86b877d5866e22ba32", + "zh:38fa32343fe63779d4ed95600d12c589b9e49bf52cf0121b1b849e4a6ee75162", + "zh:65384b0f08cab580377aafc0d944bf853663dc116f0a453acd9d701ed856ec67", + "zh:6723184842d5e7cdffa5e8225ceccc4315b2a2624157d5b42b13f51d6916812b", + "zh:772b35cd5ee7900a8cae77580d10c10d4cb8c7cf99bf2fe2e906cbf3d554ecd5", + "zh:796eeabb73fb22d5996a7b846d5462477e90c08ba9eada194c8ad1d1eb9daeb2", + "zh:8575e1867a8d8410b9d7652511b57783f4e592db64f32ab2d53950d54b3df282", + "zh:cf56e99ce0ab2e09e75da3ed3e30712ebf32125f8a436e6ea120a45079161cbb", + "zh:e591c9fcd5e1b22bc928974655276e9cff0bf66c2d82712805b42e1a6dc9fdd9", + "zh:f095b3499b57c344aa8586e4ad4dbfc65ef74ca800470ce4a805e1858b632827", + "zh:f68014d78c1eec7ba0a12eeba0713ba7ee98446621649cd05452f358a4f8a9f9", ] } provider "registry.terraform.io/terraform-provider-openstack/openstack" { - version = "3.0.0" - constraints = "3.0.0" + version = "1.54.1" + constraints = "1.54.1" hashes = [ - "h1:5LLYtkab+Vdg8CE9pI1O8ufh3sTXqWlY3uKxko/y3X8=", - "h1:9/pRGncPl05UJquv3mXwSMU0NVa3ilwPtilcZcNgdys=", - "h1:K/LldMyf/FKaGOWUPAdpkMvKqE7tH+cmxLLhHKHjseQ=", - "h1:i7sj9flNOPDNraNq3QztV+cQ1JeqSQF30xQrY6Vi2Yk=", - "h1:sxq+0vFl4SfV9dlut5liwaH4FejyNIf4ClAjRgowxv8=", - "zh:01718f229597b34ed430236a230a407dacd6289543556d33910e151462e8cb8a", - "zh:2424c5347d35fe0de778d1c40dbe8d9b1278309c4d65cce31709e6fcbec139df", - "zh:2a785f9efd6d8c979031803dd78411a583bc0d9a572ddf5fb9e539cbbfc1ce43", - "zh:8ab18c7eb1fd04b34be75fcecfc461888bd37ff017973f46745abeabd21b3fda", - "zh:98e805318292b58d9692bfe6d3d82e0db0f8044e588a38b239309221198aa92d", - "zh:9a99c9801f96dc69e7c76c5ddc0e2800b77a333becacae530d7a3acd18855347", - "zh:c5aa6690c094be211d2700d7ea44ffe937763e4dd566506c87eb99d6d8330b52", - "zh:d06fc3a148a49aab059a1f08ceadbfe1a5c82c7b80b960169987603dddaaaf58", - "zh:d079da24a9f2cc0d6fec9616e7ebe994245a3d98da629f012069c26d650edb05", - "zh:d1d2b63dba9045a4ff3869c65ddcaf2703d993254b58b2a0230d067f5c036de2", - "zh:d2b9d09c47e7eca08091da825cdf0982dc30089ee401888ce8704b79c7636e95", - "zh:dd6be78f98772bfc1ebf022fc36e5d68ef0f165f9e87476430d4e0dc3f1cc57f", - "zh:e7743c11dd0c83a5c1905f04ffd490b4471941218b4c841b760223fcf416affd", - "zh:f234aefac77f4e9a2b05877a00d322ee165009713d79a8d8e19039c6c6f7cf1b", + "h1:Cqk18+r4bJF/sIusEK9lM0gc841RwsJ8AMhWyiU7lig=", + "h1:Cs9sP2V0MssWIQo+gur9soaNAAQleRaWdnvFP61s0Y0=", + "h1:JC0mScAPBs1MlHeEIPMZTQGhTA5aIG3iEuKMSPpR31E=", + "h1:jx2WdbttenKA2gWZDil6ffQT2CcY/TZ46pG0FlbNPuY=", + "h1:xt7LbO3lAXcDUjDxPHrQtgv4mO2GKvSOFMF1uPsK4vE=", + "zh:45ba84df17f94b15af7aab7007241e035dde8a5b46aeb761259d937058a80f71", + "zh:493b1deb7be9b600e5b1f5da2a9dfd3bce5df0c6d38090614dbe4ed05ade8441", + "zh:53551401fba8c1d5b27a08ee307552b84b1d0c1218f3717a4b766ec701b3e016", + "zh:53629bebb48ce5220f7601d776c2ac1485b6c860cb695f150fb716f5be8aa86d", + "zh:5a20f32cca767bef70b79bc8ecbd10fec3dc8696183e2d29631aa510947cb70d", + "zh:653693f630777e4aa3f410976a5169cf0f2a301516a820b3860de116054ae30a", + "zh:70f2d7bd5f5940f4fc3f023a01468890fbd9d704d0256bc65f7c64fb2cbcd4e4", + "zh:9cc22af51e5124dd5c2e0f1adefb1b08dcff3138aba9c92961cef36b1641d7aa", + "zh:9df45e893f215266159733dbc120809bc3d313188e121532dc6e2d10165e9899", + "zh:cb3e240992069cd6160f5b5cbbd50b70948f25bb337a75e780a0648461505d3f", + "zh:cb8343c0cf1bf5ca4d060826a8b68e3e5935b4a65974c76ac9c071c5a510e67e", + "zh:cc2060f93c66276dff6366b48e3a0e619874e3d939e0d2a39fc6ce10ca91232d", + "zh:d495b3051977018696113eded89c2cddfae0570f2adbdf7e9097c189ba41903e", + "zh:dfad1be943769780d5e948c06db957ce45f98b057a774964da0b82130c22f139", ] } diff --git a/terraform/infrastructure/openstack/main.tf b/terraform/infrastructure/openstack/main.tf index 325279818..9fc228bdc 100644 --- a/terraform/infrastructure/openstack/main.tf +++ b/terraform/infrastructure/openstack/main.tf @@ -2,17 +2,17 @@ terraform { required_providers { openstack = { source = "terraform-provider-openstack/openstack" - version = "3.0.0" + version = "1.54.1" } stackit = { source = "stackitcloud/stackit" - version = "0.53.0" + version = "0.15.1" } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } @@ -22,7 +22,7 @@ provider "openstack" { } provider "stackit" { - default_region = "eu01" + region = "eu01" } @@ -43,11 +43,10 @@ locals { { name = "recovery", port = "9999", health_check = "TCP" }, { name = "join", port = "30090", health_check = "TCP" }, var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [], - var.emergency_ssh ? [{ name = "ssh", port = "22", health_check = "TCP" }] : [], ]) cidr_vpc_subnet_nodes = "192.168.178.0/24" cidr_vpc_subnet_lbs = "192.168.177.0/24" - tags = concat(["constellation-uid-${local.uid}"], var.additional_tags) + tags = ["constellation-uid-${local.uid}"] identity_service = [ for entry in data.openstack_identity_auth_scope_v3.scope.service_catalog : entry if entry.type == "identity" @@ -60,13 +59,6 @@ locals { cloudsyaml_path = length(var.openstack_clouds_yaml_path) > 0 ? var.openstack_clouds_yaml_path : "~/.config/openstack/clouds.yaml" cloudsyaml = yamldecode(file(pathexpand(local.cloudsyaml_path))) cloudyaml = local.cloudsyaml.clouds[var.cloud] - revision = 1 -} - -# A way to force replacement of resources if the provider does not want to replace them -# see: https://developer.hashicorp.com/terraform/language/resources/terraform-data#example-usage-data-for-replace_triggered_by -resource "terraform_data" "replacement" { - input = local.revision } resource "random_id" "uid" { @@ -250,7 +242,6 @@ module "instance_group" { openstack_username = local.cloudyaml["auth"]["username"] openstack_password = local.cloudyaml["auth"]["password"] openstack_user_domain_name = local.cloudyaml["auth"]["user_domain_name"] - openstack_region_name = local.cloudyaml["region_name"] openstack_load_balancer_endpoint = openstack_networking_floatingip_v2.public_ip.address } diff --git a/terraform/infrastructure/openstack/modules/instance_group/main.tf b/terraform/infrastructure/openstack/modules/instance_group/main.tf index 5f027fdfa..b104506d5 100644 --- a/terraform/infrastructure/openstack/modules/instance_group/main.tf +++ b/terraform/infrastructure/openstack/modules/instance_group/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { openstack = { source = "terraform-provider-openstack/openstack" - version = "3.0.0" + version = "1.54.1" } } } @@ -80,7 +80,6 @@ resource "openstack_compute_instance_v2" "instance_group_member" { openstack-username = var.openstack_username openstack-password = var.openstack_password openstack-user-domain-name = var.openstack_user_domain_name - openstack-region-name = var.openstack_region_name openstack-load-balancer-endpoint = var.openstack_load_balancer_endpoint }) availability_zone_hints = length(var.availability_zone) > 0 ? var.availability_zone : null diff --git a/terraform/infrastructure/openstack/modules/instance_group/variables.tf b/terraform/infrastructure/openstack/modules/instance_group/variables.tf index 991985c97..74f0f9e28 100644 --- a/terraform/infrastructure/openstack/modules/instance_group/variables.tf +++ b/terraform/infrastructure/openstack/modules/instance_group/variables.tf @@ -97,11 +97,6 @@ variable "openstack_password" { description = "OpenStack password." } -variable "openstack_region_name" { - type = string - description = "OpenStack region name." -} - variable "openstack_load_balancer_endpoint" { type = string description = "OpenStack load balancer endpoint." diff --git a/terraform/infrastructure/openstack/modules/loadbalancer/main.tf b/terraform/infrastructure/openstack/modules/loadbalancer/main.tf index f86399612..a10a43d8f 100644 --- a/terraform/infrastructure/openstack/modules/loadbalancer/main.tf +++ b/terraform/infrastructure/openstack/modules/loadbalancer/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { openstack = { source = "terraform-provider-openstack/openstack" - version = "3.0.0" + version = "1.54.1" } } } diff --git a/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf b/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf index 50179a534..a3afe6491 100644 --- a/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf +++ b/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { stackit = { source = "stackitcloud/stackit" - version = "0.53.0" + version = "0.15.1" } } } diff --git a/terraform/infrastructure/openstack/outputs.tf b/terraform/infrastructure/openstack/outputs.tf index 810b11977..be45ec065 100644 --- a/terraform/infrastructure/openstack/outputs.tf +++ b/terraform/infrastructure/openstack/outputs.tf @@ -36,11 +36,6 @@ output "ip_cidr_node" { description = "CIDR block of the node network." } -output "loadbalancer_address" { - value = openstack_networking_floatingip_v2.public_ip.address - description = "Public loadbalancer address." -} - # OpenStack-specific outputs output "network_id" { diff --git a/terraform/infrastructure/openstack/variables.tf b/terraform/infrastructure/openstack/variables.tf index 3afa8c97a..98714a200 100644 --- a/terraform/infrastructure/openstack/variables.tf +++ b/terraform/infrastructure/openstack/variables.tf @@ -59,21 +59,9 @@ variable "floating_ip_pool_id" { description = "Pool (network name) to use for floating IPs." } -variable "additional_tags" { - type = list(any) - default = [] - description = "Additional tags that should be applied to created resources." -} - # STACKIT-specific variables variable "stackit_project_id" { type = string description = "STACKIT project ID." } - -variable "emergency_ssh" { - type = bool - default = false - description = "Wether to expose the SSH port through the public load balancer." -} diff --git a/terraform/infrastructure/qemu/.terraform.lock.hcl b/terraform/infrastructure/qemu/.terraform.lock.hcl index 1796c477f..fe10c6d34 100644 --- a/terraform/infrastructure/qemu/.terraform.lock.hcl +++ b/terraform/infrastructure/qemu/.terraform.lock.hcl @@ -2,99 +2,77 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/dmacvicar/libvirt" { - version = "0.8.3" - constraints = "0.8.3" + version = "0.7.1" + constraints = "0.7.1" hashes = [ - "h1:4R2L9SlbXt5VVZtQ0vPe1s65UygQT3aKTWpsGKqTnvM=", - "h1:AGOjd7NzTBQAiOjjlUdq5JjuYdWBL6jfrNf+qO7Z+cg=", - "h1:HuCu3y7gzhI1YLYWY7L9c3f5lhJyo5/Peozd50hoj4w=", - "h1:SE2o8WY/6vqSTow2nds6P2+IoJln0TvuF794xBjDIeE=", - "h1:Tttxr3E9O75MM+dDmq5sYHQEw29PwtIj+XDj/5drdfE=", - "h1:Wqa218+IMxnn4psH5r4FlvEL2/LKZRHc4tFm63uOrVc=", - "h1:Z+uoKz1rZ/z0xwEvycBhuDGDwQDKhHUDxlAfKkoGCko=", - "h1:bKZooqVcZ2vCovziB23j4exHuGg8s/FyxjoZ0NIY36Y=", - "h1:dSnCr6ib3qdJot4gXPMe8DW9pHwRPTkN+jkCbG/w4uI=", - "h1:fg6FmBx96CV7M1osSsZ215N47M3TGFeaoy2XVSmh2/Y=", - "h1:h7F4ON38TMmy+8fAoYBaKDOFWv7ZVUnI0J+h7jSL2T8=", - "h1:mymyoJNy7FTe3lkYzgt2Gqgrehn1m1EiqNLrpx5nerk=", - "h1:x44dqohhlnP6MxVsv0c07CcjOERfOBt79QCaoPgtFic=", - "h1:zEKrqI+ape4+ySL7RAZf2PFQ5jBqgfr239zdkyr6U+U=", - "zh:06ff0169beafd1891dc5a30616983abd32004a4f570d1d3dbb5851d84bd1c007", - "zh:2dbdd726d0987cda73b56ecdfbcb98a67485e86a7a44aec976c0081b7239d89d", - "zh:2e195a7bbdfcc13c45460571a5ba848a5c1e746b477c8381058767560f0ac93b", - "zh:3952da13080018c5aec498b73e343c4c22ad884afb8c983138fb7255617aa991", - "zh:478841bcf57df938726ddb90f55c7953fad09db4f6348747519afe7fc84b403b", - "zh:53bce78b03a82c4782acfe1f32c2b46a68fa5fb2fb90d4a5392c90b436b44244", - "zh:5c157f23e9768c67cddf9e847a571adca441607cb5adfb96dbfdd626ceadf92c", - "zh:6bc78d631959fb695664966851308e140c38f3f5cf648dd89756320c2d91765d", - "zh:8605d7d6915190836802654920a8eea3d751ae437273c4f4476dc0ebb9167a1d", - "zh:8b66a22b97331c2a56aed092fd39152d06ad957fd4810aa3f0c4ade0f9b15755", - "zh:92586a47a04082f70bb33f722672127a287caeed109beaaca2668e2e1d6a9caf", - "zh:99a9ee414f5c4268e287660ce8edec2efcba1f79351f83791b64c7e5ab04f569", - "zh:b7cff09fe74b0eb63b5b9aa94de5b33dadbd006d6d5b9578ac476039ea20b062", - "zh:d4188a343ff32c0e03ff28c7e84abce0f43cad2fdbcd9046eaafc247429039ff", + "h1:1yEJVPVFkRkbRY63+sFRAWau/eJ0xlecHWLCV8spkWU=", + "h1:AJn6IML1iiq9oIUdDQTDApMvsfSKfMncF4RoKnhpNaY=", + "h1:G114r+ESpxpMCnBxFXZZ3+HktoNK4WXAJ5M3GRwvgBQ=", + "h1:ZG+KVAKVm++wfWnGdc8QIFn1LHRycUnmYibMg4REQyk=", + "h1:rDm9KgxNWuhdTCJpfepeTzCB/b24bKrOMN57637RZtU=", + "zh:1c59f2ab68da6326637ee8b03433e84af76b3e3562f251a7f2aa239a7b262a8d", + "zh:236e24ecf036e99d9d1e2081a39dc9cb4b8993850a37141a1449f20750f883d6", + "zh:4519c22b1f00c1d37d60ac6c2cb7ad5ab9dbcd44a80b4f61e68aacb54eae017d", + "zh:54de4e3c979c32af1dc71ec2846912f669a28bdb0990e8a3c1fb8fea4ede7b61", + "zh:6270a757bcf4e1f9efe47726cf0caefba30a25e59d151103cf03d1656325783c", + "zh:68b8586d5b29c0a1cb7c608a309b38db911449c072d60eee9e40e01881f1c23a", + "zh:724ba2290fea704714378e9363541420c36091e790c7f39150cde8987d4e0754", + "zh:7b6860c92376cdad98273aab4bea62546622e08f50733e4b2e58a7a859d3b49d", + "zh:986a0a4f8d9511c64bcac8010337deb43110b4c2f91969b2491fd9edc290b60e", + "zh:aff0f6f24d69cd97a44cd6059edaf355769fbb8a7643a6db4d52c9a94f98e194", + "zh:c46ca3f8384d06c13a7ed3d4b83c65b4f8dccbf9d5f624843b68d176add5c5c2", + "zh:ef310534e7d38153aca4ce31655b52a6e6c4d76f32e49732c96b62e9de1ee843", + "zh:f1566b094f4267ef2674889d874962dd41e0cba55251645e16d003c77ca8a19c", + "zh:f2e019df7b537069828c5537c481e5b7f41d2404eef6fe5c86702c20900b303d", ] } provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = "3.7.2" + version = "3.6.0" + constraints = "3.6.0" hashes = [ - "h1:0hcNr59VEJbhZYwuDE/ysmyTS0evkfcLarlni+zATPM=", - "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", - "h1:Def/iHM4HihJCIxQ8AYoxtoVL5lVlYx0V7bX91pxwgM=", - "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", - "h1:Lmv2TxyKKm9Vt4uxcPZHw1uf0Ax/yYizJlilbLSZN8E=", - "h1:hkKSY5xI4R1H4Yrg10HHbtOoxZif2dXa9HFPSbaVg5o=", - "h1:khu3pu9zeUMd6Ev+yH6cQ1S4+xpzx8wqwwFwADYGeRI=", - "h1:l35vnL76rzaOjhhJQiaWviW0noK2YzHeHN0/vIXJnHk=", - "h1:nWZjMYzp+nsqD3xslcihzq1Enxv33a7iC8/I8CTBcHA=", - "h1:pSMn/cwmyHB6V67lToGmCHfJFfzA711vV+E1cGP0LBg=", - "h1:w+NoF7vNMFS+qrU2XUEm0/wnuIZxPC733qOOfLVOdfk=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", + "h1:5KeoKSVKVHJW308uiTgslxFbjQAdWzBGUFK68vgMRWY=", + "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=", + "h1:R5Ucn26riKIEijcsiOMBR3uOAjuOMfI1x7XvH4P6B1w=", + "h1:p6WG1IPHnqx1fnJVKNjv733FBaArIugqy58HRZnpPCk=", + "h1:t0mRdJzegohRKhfdoQEJnv3JRISSezJRblN0HIe67vo=", + "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d", + "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211", + "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829", + "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d", + "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", + "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17", + "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21", + "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839", + "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0", + "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c", + "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e", ] } provider "registry.terraform.io/kreuzwerker/docker" { - version = "3.5.0" - constraints = "3.5.0" + version = "3.0.2" + constraints = "3.0.2" hashes = [ - "h1:2m9zj6qkquTHzwXFp+8NpA6e7n6RgckdzvLcBfCCaX4=", - "h1:5fbzStQvS4Ec+/QKefUVDf/pabuwGBpVgLYeigOpMe8=", - "h1:5ykMfyIfMooAmowvmPXc7sVdkhkATrOSkq98Ca0T4ZQ=", - "h1:CIiLFn0SJ+1b2ArewLFjDThJ6rxFf54u9FATgZD9Kac=", - "h1:GSPQAzIJs+b3Ypy9YUTvF4/RrA8Uawx+liA+cQ7RaP4=", - "h1:TYbKlCFkOOI9hh63nbWT6yvd/0PfBONtMO4jkGvu8Us=", - "h1:TzYSjwiML0arIOHzYWD7dLMZZEg0jLHTiSNNNB9c0H8=", - "h1:bU1vBUeUoyaGMkforubnY6O1udOLwYWReXTnneZ8sDk=", - "h1:d+4qP/GCkGnPyfzSa19M49n+6FkIe1BmuEl+HXSDYXs=", - "h1:f7cuN7GkUm4ucgZzr0DwzdbbXrRUmKvmUHkKL40Zg7U=", - "h1:j0REIQY6zq1P0iDUdo5pGZQZy85/EK5ckvRqqzYMMLQ=", - "h1:q00T95a38TRhPJ90y8okMMGQtbOQPROMr+sCbU/mP8Y=", - "h1:qXkfTd57rY0sp++8oU+qSeSMLRLLOZu7jWEJONOkwek=", - "zh:01fa6b2f9c44fc0f286f27941d3866adb749e34235f25b9d731b4f45f9c8a601", - "zh:0f945042448360b3f44662db4dceb7287236a950cf936bc973735acc7cded206", - "zh:44f1e6b5b451d072e84dffe61dbf2c3fbbb4a9f8e98f921a99025ac19c6adbb6", - "zh:5724f8af7c2bbd0f019114421dd46e7c8983ec7ea9444a03fc3a770510b47f03", - "zh:790d0b9bc941d1953a739da5426b6a5efa4db97a9c4bb30c52019536abc40661", - "zh:7b89c7cf131a33604a9b8c3fd7a94b28446beba633bbf52e6e37ea3fef643421", - "zh:7e59eb91e253a3f04b40e1e0b45b027fed0e69fed78cd996788a23d92f02137d", - "zh:9caf1c13d1c6ed6a3b7aa11a61b56623b6bd30a6a1e396558c3dbe82370f34f5", - "zh:ba5b46ae242953fbec32f9f93dc5a81fd2e6962df68f17c670c0533040c74b42", - "zh:cae081982d5931ad265223c09824f2b3cd58a1fe307ccc701188d688ad217e6e", - "zh:cda2545056b6028d84505b18aaf20e7c769f6665fe27e16d2c52e35d23840689", - "zh:e41f2b9901202cb2cf7929005340ffc498ab1ca813005e364af4016a85634b5e", - "zh:e84272077a0287bc68523134292d9b12af54e200e413a89ae37f5788c063de5d", + "h1:DcRxJArfX6EiATluWeCBW7HoD6usz9fMoTK2U3dmyPk=", + "h1:XjdpVL61KtTsuPE8swok3GY8A+Bu3TZs8T2DOEpyiXo=", + "h1:cT2ccWOtlfKYBUE60/v2/4Q6Stk1KYTNnhxSck+VPlU=", + "h1:os8pBi4rbtFJJtzNWlcGhOVsz5V9UPJvo+L0wNQFYE8=", + "h1:tMiDR/3WQYAwE4Z7Xr1iqJN23z2GNr1ARis9yutVgjw=", + "zh:15b0a2b2b563d8d40f62f83057d91acb02cd0096f207488d8b4298a59203d64f", + "zh:23d919de139f7cd5ebfd2ff1b94e6d9913f0977fcfc2ca02e1573be53e269f95", + "zh:38081b3fe317c7e9555b2aaad325ad3fa516a886d2dfa8605ae6a809c1072138", + "zh:4a9c5065b178082f79ad8160243369c185214d874ff5048556d48d3edd03c4da", + "zh:5438ef6afe057945f28bce43d76c4401254073de01a774760169ac1058830ac2", + "zh:60b7fadc287166e5c9873dfe53a7976d98244979e0ab66428ea0dea1ebf33e06", + "zh:61c5ec1cb94e4c4a4fb1e4a24576d5f39a955f09afb17dab982de62b70a9bdd1", + "zh:a38fe9016ace5f911ab00c88e64b156ebbbbfb72a51a44da3c13d442cd214710", + "zh:c2c4d2b1fd9ebb291c57f524b3bf9d0994ff3e815c0cd9c9bcb87166dc687005", + "zh:d567bb8ce483ab2cf0602e07eae57027a1a53994aba470fa76095912a505533d", + "zh:e83bf05ab6a19dd8c43547ce9a8a511f8c331a124d11ac64687c764ab9d5a792", + "zh:e90c934b5cd65516fbcc454c89a150bfa726e7cf1fe749790c7480bbeb19d387", + "zh:f05f167d2eaf913045d8e7b88c13757e3cf595dd5cd333057fdafc7c4b7fed62", + "zh:fcc9c1cea5ce85e8bcb593862e699a881bd36dffd29e2e367f82d15368659c3d", ] } diff --git a/terraform/infrastructure/qemu/main.tf b/terraform/infrastructure/qemu/main.tf index 3c01a07a9..047e9dbdc 100644 --- a/terraform/infrastructure/qemu/main.tf +++ b/terraform/infrastructure/qemu/main.tf @@ -2,15 +2,11 @@ terraform { required_providers { libvirt = { source = "dmacvicar/libvirt" - version = "0.8.3" + version = "0.7.1" } docker = { source = "kreuzwerker/docker" - version = "3.5.0" - } - random = { - source = "hashicorp/random" - version = "3.7.2" + version = "3.0.2" } } } @@ -27,13 +23,6 @@ locals { cidr_vpc_subnet_nodes = "10.42.0.0/22" cidr_vpc_subnet_control_planes = "10.42.1.0/24" cidr_vpc_subnet_worker = "10.42.2.0/24" - revision = 1 -} - -# A way to force replacement of resources if the provider does not want to replace them -# see: https://developer.hashicorp.com/terraform/language/resources/terraform-data#example-usage-data-for-replace_triggered_by -resource "terraform_data" "replacement" { - input = local.revision } resource "random_password" "init_secret" { diff --git a/terraform/infrastructure/qemu/modules/instance_group/main.tf b/terraform/infrastructure/qemu/modules/instance_group/main.tf index 00c83c6df..75ecbf2b8 100644 --- a/terraform/infrastructure/qemu/modules/instance_group/main.tf +++ b/terraform/infrastructure/qemu/modules/instance_group/main.tf @@ -2,11 +2,11 @@ terraform { required_providers { libvirt = { source = "dmacvicar/libvirt" - version = "0.8.3" + version = "0.7.1" } random = { source = "hashicorp/random" - version = "3.7.2" + version = "3.6.0" } } } diff --git a/terraform/legacy-module/README.md b/terraform/legacy-module/README.md new file mode 100644 index 000000000..c755b0a54 --- /dev/null +++ b/terraform/legacy-module/README.md @@ -0,0 +1,6 @@ +## Constellation Terraform Modules + +> [!WARNING] +> The Constellation Terraform modules are deprecated, and support will be discontinued in v2.15.0. +> To continue managing Constellation clusters through Terraform, you can use the [Constellation Terraform provider](https://docs.edgeless.systems/constellation/workflows/terraform-provider). +> Clusters created through the Constellation Terraform modules can also be [imported](https://registry.terraform.io/providers/edgelesssys/constellation/latest/docs/resources/cluster#import) to the Constellation Terraform provider. diff --git a/terraform/legacy-module/aws-constellation/main.tf b/terraform/legacy-module/aws-constellation/main.tf new file mode 100644 index 000000000..de89f7a68 --- /dev/null +++ b/terraform/legacy-module/aws-constellation/main.tf @@ -0,0 +1,67 @@ +locals { + region = substr(var.zone, 0, length(var.zone) - 1) +} + +module "aws_iam" { + source = "../../infrastructure/iam/aws" + name_prefix = var.name_prefix + region = local.region +} + +resource "null_resource" "ensure_yq" { + provisioner "local-exec" { + command = < "image.txt" + + if [ '${var.csp}' = 'azure' ]; then + sed -i 's/CommunityGalleries/communityGalleries/g' image.txt + sed -i 's/Images/images/g' image.txt + sed -i 's/Versions/versions/g' image.txt + fi + EOT +} + + +resource "null_resource" "fetch_image" { + provisioner "local-exec" { + command = local.fetch_image_command + + environment = { + attestation_variant = var.attestation_variant + } + } + provisioner "local-exec" { + when = destroy + command = "rm image.txt" + } + triggers = { + always_run = "${timestamp()}" + } +} + +data "local_file" "image" { + filename = "image.txt" + depends_on = [null_resource.fetch_image] +} diff --git a/terraform/legacy-module/common/fetch-image/output.tf b/terraform/legacy-module/common/fetch-image/output.tf new file mode 100644 index 000000000..8fcdc030f --- /dev/null +++ b/terraform/legacy-module/common/fetch-image/output.tf @@ -0,0 +1,4 @@ +output "image" { + description = "The resolved image ID of the CSP." + value = data.local_file.image.content +} diff --git a/terraform/legacy-module/common/fetch-image/variables.tf b/terraform/legacy-module/common/fetch-image/variables.tf new file mode 100644 index 000000000..25b88bd1b --- /dev/null +++ b/terraform/legacy-module/common/fetch-image/variables.tf @@ -0,0 +1,20 @@ +variable "csp" { + description = "The cloud service provider to fetch image data for." + type = string +} + +variable "attestation_variant" { + description = "The attestation variant to fetch image data for." + type = string +} + +variable "region" { + description = "The region to fetch image data for." + type = string + default = "" +} + +variable "image" { + description = "The image reference or semantical release version to fetch image data for." + type = string +} diff --git a/terraform/legacy-module/common/install-yq.sh b/terraform/legacy-module/common/install-yq.sh new file mode 100755 index 000000000..14c375fd9 --- /dev/null +++ b/terraform/legacy-module/common/install-yq.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +version="v4.35.2" +if [[ -f ./yq ]] && ./yq --version | grep -q "${version}"; then + echo "yq is already available and up to date." + exit 0 +fi +if [[ -f ./yq ]]; then + echo "yq is already available but not at the required version. Replacing with ${version}." + rm -f yq +fi + +echo "Fetching yq ${version}" +os=$(uname -s) +arch=$(uname -m) +url="" + +if [[ ${os} == "Darwin" ]]; then + if [[ ${arch} == "arm64" ]]; then + url="https://github.com/mikefarah/yq/releases/download/${version}/yq_darwin_arm64" + elif [[ ${arch} == "x86_64" ]]; then + url="https://github.com/mikefarah/yq/releases/download/${version}/yq_darwin_amd64" + fi +elif [[ ${os} == "Linux" ]]; then + if [[ ${arch} == "x86_64" ]]; then + url="https://github.com/mikefarah/yq/releases/download/${version}/yq_linux_amd64" + elif [[ ${arch} == "arm64" ]]; then + url="https://github.com/mikefarah/yq/releases/download/${version}/yq_linux_arm64" + fi +fi + +if [[ -z ${url} ]]; then + echo "os \"${os}\" and/or architecture \"${arch}\" is not supported." + exit 1 +else + echo "Downloading yq from ${url}" + curl -o yq -L "${url}" + chmod +x ./yq + ./yq --version + if ! ./yq --version | grep -q "${version}"; then # check that yq was installed correctly + echo "Version is incorrect" + exit 1 + fi +fi diff --git a/terraform/legacy-module/constellation-cluster/install-constellation.sh b/terraform/legacy-module/constellation-cluster/install-constellation.sh new file mode 100755 index 000000000..b056db21a --- /dev/null +++ b/terraform/legacy-module/constellation-cluster/install-constellation.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +if [[ -f ./constellation ]]; then + echo "constellation CLI is already available." + exit 0 +fi + +os=$(uname -s) +arch=$(uname -m) +version=$1 +url="" + +echo "Fetching constellation ${version}" + +if [[ ${os} == "Darwin" ]]; then + if [[ ${arch} == "arm64" ]]; then + url="https://github.com/edgelesssys/constellation/releases/${version}/download/constellation-darwin-arm64" + elif [[ ${arch} == "x86_64" ]]; then + url="https://github.com/edgelesssys/constellation/releases/${version}/download/constellation-darwin-amd64" + fi +elif [[ ${os} == "Linux" ]]; then + if [[ ${arch} == "x86_64" ]]; then + url="https://github.com/edgelesssys/constellation/releases/${version}/download/constellation-linux-amd64" + elif [[ ${arch} == "arm64" ]]; then + url="https://github.com/edgelesssys/constellation/releases/${version}/download/constellation-linux-arm64" + fi +fi + +if [[ -z ${url} ]]; then + echo "os \"${os}\" and/or architecture \"${arch}\" is not supported." + exit 1 +else + curl -o constellation -L "${url}" + chmod +x constellation +fi diff --git a/terraform/legacy-module/constellation-cluster/main.tf b/terraform/legacy-module/constellation-cluster/main.tf new file mode 100644 index 000000000..f84406f16 --- /dev/null +++ b/terraform/legacy-module/constellation-cluster/main.tf @@ -0,0 +1,191 @@ +locals { + yq_node_groups = join("\n", flatten([ + for name, group in var.node_groups : [ + "./yq eval '.nodeGroups.${name}.role = \"${group.role}\"' -i constellation-conf.yaml", + "./yq eval '.nodeGroups.${name}.zone = \"${group.zone}\"' -i constellation-conf.yaml", + "./yq eval '.nodeGroups.${name}.instanceType = \"${group.instance_type}\"' -i constellation-conf.yaml", + "./yq eval '.nodeGroups.${name}.stateDiskSizeGB = ${group.disk_size}' -i constellation-conf.yaml", + "./yq eval '.nodeGroups.${name}.stateDiskType = \"${group.disk_type}\"' -i constellation-conf.yaml", + "./yq eval '.nodeGroups.${name}.initialCount = ${group.initial_count}' -i constellation-conf.yaml" + ] + ])) + gcp_sa_file_path = "service_account_file.json" +} + +resource "null_resource" "ensure_cli" { + provisioner "local-exec" { + command = < "${local.gcp_sa_file_path}" + EOT + } + provisioner "local-exec" { + when = destroy + command = "rm ${self.triggers.file_path}" + } + triggers = { + always_run = timestamp() + file_path = local.gcp_sa_file_path + } +} + +resource "null_resource" "gcp_config" { + count = var.gcp_config != null ? 1 : 0 + provisioner "local-exec" { + command = <